aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap5
-rw-r--r--CREDITS7
-rw-r--r--Documentation/Changes9
-rw-r--r--Documentation/DocBook/drm.tmpl83
-rw-r--r--Documentation/DocBook/gadget.tmpl2
-rw-r--r--Documentation/DocBook/genericirq.tmpl4
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl2
-rw-r--r--Documentation/DocBook/libata.tmpl6
-rw-r--r--Documentation/DocBook/media_api.tmpl2
-rw-r--r--Documentation/DocBook/mtdnand.tmpl30
-rw-r--r--Documentation/DocBook/regulator.tmpl2
-rw-r--r--Documentation/DocBook/uio-howto.tmpl4
-rw-r--r--Documentation/DocBook/usb.tmpl2
-rw-r--r--Documentation/DocBook/writing-an-alsa-driver.tmpl2
-rw-r--r--Documentation/acpi/enumeration.txt6
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/exynos/power_domain.txt20
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt6
-rw-r--r--Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt30
-rw-r--r--Documentation/devicetree/bindings/drm/i2c/tda998x.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/msm/gpu.txt52
-rw-r--r--Documentation/devicetree/bindings/drm/msm/hdmi.txt46
-rw-r--r--Documentation/devicetree/bindings/drm/msm/mdp.txt48
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt189
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b133htn01.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/foxlink,fl500wvr00-a0t.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/innolux,n116bge.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/innolux,n156bge-l21.txt7
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt7
-rw-r--r--Documentation/devicetree/bindings/video/exynos_dsim.txt4
-rw-r--r--Documentation/devicetree/bindings/video/exynos_mixer.txt5
-rw-r--r--Documentation/devicetree/bindings/video/samsung-fimd.txt28
-rw-r--r--Documentation/input/event-codes.txt13
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/laptops/00-INDEX4
-rw-r--r--Documentation/laptops/freefall.c (renamed from Documentation/laptops/hpfall.c)59
-rw-r--r--MAINTAINERS44
-rw-r--r--Makefile103
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts4
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts4
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi6
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi6
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts1
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi10
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi25
-rw-r--r--arch/arm/boot/dts/hi3620.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi7
-rw-r--r--arch/arm/crypto/aesbs-glue.c10
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/kernel/devtree.c8
-rw-r--r--arch/arm/kernel/iwmmxt.S23
-rw-r--r--arch/arm/kernel/kgdb.c4
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c30
-rw-r--r--arch/arm/kernel/kprobes-test.c10
-rw-r--r--arch/arm/kernel/probes-arm.c6
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/mach-exynos/exynos.c18
-rw-r--r--arch/arm/mach-exynos/firmware.c9
-rw-r--r--arch/arm/mach-exynos/hotplug.c10
-rw-r--r--arch/arm/mach-exynos/platsmp.c34
-rw-r--r--arch/arm/mach-exynos/pm_domains.c61
-rw-r--r--arch/arm/mach-imx/clk-gate2.c31
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c4
-rw-r--r--arch/arm/mach-mvebu/coherency.c6
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S9
-rw-r--r--arch/arm/mach-mvebu/pmsu.c10
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c2
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h3
-rw-r--r--arch/arm/mach-omap2/common.h3
-rw-r--r--arch/arm/mach-omap2/devices.c28
-rw-r--r--arch/arm/mach-omap2/dsp.c10
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c18
-rw-r--r--arch/arm/mach-omap2/gpmc.c2
-rw-r--r--arch/arm/mach-omap2/omap4-common.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c18
-rw-r--r--arch/arm/mach-omap2/prm-regbits-34xx.h6
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/idmap.c12
-rw-r--r--arch/arm/mm/mmu.c6
-rw-r--r--arch/arm/xen/grant-table.c5
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/crypto/aes-glue.c12
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/kernel/efi-stub.c2
-rw-r--r--arch/arm64/mm/copypage.c2
-rw-r--r--arch/arm64/mm/init.c17
-rw-r--r--arch/blackfin/configs/BF609-EZKIT_defconfig2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c1
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c6
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c20
-rw-r--r--arch/blackfin/mach-bf609/include/mach/pm.h5
-rw-r--r--arch/blackfin/mach-bf609/pm.c4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c2
-rw-r--r--arch/m68k/kernel/head.S3
-rw-r--r--arch/m68k/kernel/time.c2
-rw-r--r--arch/parisc/include/uapi/asm/signal.h2
-rw-r--r--arch/parisc/kernel/hardware.c3
-rw-r--r--arch/parisc/kernel/sys_parisc32.c46
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/mm/init.c1
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h19
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h3
-rw-r--r--arch/powerpc/include/asm/mmu.h10
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h3
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/rtas_flash.c6
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c65
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/lib/mem_64.S2
-rw-r--r--arch/powerpc/lib/sstep.c10
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c12
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c10
-rw-r--r--arch/powerpc/perf/core-book3s.c26
-rw-r--r--arch/powerpc/perf/power8-pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c4
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/kernel/head.S6
-rw-r--r--arch/s390/kernel/ptrace.c12
-rw-r--r--arch/s390/pci/pci.c49
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/sys32.S1
-rw-r--r--arch/sparc/kernel/systbls_32.S1
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/um/kernel/tlb.c9
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/os-Linux/skas/process.c9
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/header.S26
-rw-r--r--arch/x86/boot/tools/build.c38
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c2
-rw-r--r--arch/x86/include/asm/irqflags.h2
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c22
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.h12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c78
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c11
-rw-r--r--arch/x86/kernel/entry_32.S9
-rw-r--r--arch/x86/kernel/entry_64.S28
-rw-r--r--arch/x86/kernel/espfix_64.c5
-rw-r--r--arch/x86/kernel/kprobes/core.c3
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c2
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/vdso/vdso2c.h3
-rw-r--r--arch/x86/vdso/vma.c4
-rw-r--r--arch/x86/xen/grant-table.c148
-rw-r--r--arch/xtensa/kernel/vectors.S158
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S4
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-tag.c33
-rw-r--r--block/compat_ioctl.c1
-rw-r--r--crypto/af_alg.c2
-rw-r--r--drivers/acpi/ac.c130
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/battery.c41
-rw-r--r--drivers/acpi/ec.c164
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/video.c21
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_imx.c38
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_xgene.c60
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libahci_platform.c7
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/base/component.c192
-rw-r--r--drivers/base/platform.c18
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/block/zram/zram_drv.c22
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bluetooth/hci_h5.c1
-rw-r--r--drivers/char/agp/frontend.c15
-rw-r--r--drivers/char/hw_random/core.c47
-rw-r--r--drivers/char/hw_random/virtio-rng.c10
-rw-r--r--drivers/char/i8k.c4
-rw-r--r--drivers/char/random.c17
-rw-r--r--drivers/clk/clk-s2mps11.c7
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c91
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c9
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c6
-rw-r--r--drivers/clk/spear/spear3xx_clock.c16
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c2
-rw-r--r--drivers/clk/ti/apll.c8
-rw-r--r--drivers/clk/ti/clk-7xx.c7
-rw-r--r--drivers/clk/ti/dpll.c5
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clocksource/exynos_mct.c20
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c7
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c35
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/crypto/caam/jr.c8
-rw-r--r--drivers/dma/cppi41.c13
-rw-r--r--drivers/dma/imx-sdma.c22
-rw-r--r--drivers/firewire/Kconfig1
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/firmware/efi/efi.c22
-rw-r--r--drivers/firmware/efi/fdt.c10
-rw-r--r--drivers/gpio/gpio-mcp23s08.c6
-rw-r--r--drivers/gpio/gpio-rcar.c1
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile8
-rw-r--r--drivers/gpu/drm/armada/armada_510.c23
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c187
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h11
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h13
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c245
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c12
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c17
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c14
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c17
-rw-r--r--drivers/gpu/drm/drm_context.c102
-rw-r--r--drivers/gpu/drm/drm_crtc.c557
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2715
-rw-r--r--drivers/gpu/drm/drm_drv.c1186
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c53
-rw-r--r--drivers/gpu/drm/drm_fops.c89
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c372
-rw-r--r--drivers/gpu/drm/drm_legacy.h51
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c14
-rw-r--r--drivers/gpu/drm/drm_of.c67
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c7
-rw-r--r--drivers/gpu/drm/drm_rect.c140
-rw-r--r--drivers/gpu/drm/drm_stub.c797
-rw-r--r--drivers/gpu/drm/drm_sysfs.c90
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c115
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c283
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c277
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c259
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c57
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c390
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c425
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c75
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c72
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h189
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c110
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c312
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c161
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c148
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c89
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c631
-rw-r--r--drivers/gpu/drm/i915/i915_params.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h503
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c57
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c36
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c474
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1475
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c720
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c548
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h156
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c8
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c32
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c100
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c54
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c25
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c12
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c22
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c176
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c897
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h2
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c444
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h90
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c36
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c212
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c14
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h58
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h296
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h56
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h239
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c69
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h109
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c23
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c377
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c25
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h431
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c159
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h25
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c91
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c45
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c31
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c21
-rw-r--r--drivers/gpu/drm/panel/Kconfig7
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c21
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c29
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c203
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h4
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c730
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c247
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c38
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c178
-rw-r--r--drivers/gpu/drm/radeon/r100.c62
-rw-r--r--drivers/gpu/drm/radeon/r300.c13
-rw-r--r--drivers/gpu/drm/radeon/r600.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h145
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h77
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c382
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c260
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c319
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c287
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.c167
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h71
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c329
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c17
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c467
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c172
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c152
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h5
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c62
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h3
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c13
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sti/Kconfig14
-rw-r--r--drivers/gpu/drm/sti/Makefile21
-rw-r--r--drivers/gpu/drm/sti/NOTES58
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c281
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h90
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c421
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c241
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.h29
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c195
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h18
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c549
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h16
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c794
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c810
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h88
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c336
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h14
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c211
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h14
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c197
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h123
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c249
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h54
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c648
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c138
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h12
-rw-r--r--drivers/gpu/drm/sti/sti_vtac.c215
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c366
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.h28
-rw-r--r--drivers/gpu/drm/tegra/output.c2
-rw-r--r--drivers/gpu/drm/tilcdc/Module.symvers0
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c34
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c9
-rw-r--r--drivers/gpu/drm/udl/udl_main.c15
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-rmi.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c25
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hv_kvp.c17
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hwmon/adc128d818.c28
-rw-r--r--drivers/hwmon/adm1021.c14
-rw-r--r--drivers/hwmon/adm1029.c3
-rw-r--r--drivers/hwmon/adm1031.c8
-rw-r--r--drivers/hwmon/adt7470.c6
-rw-r--r--drivers/hwmon/amc6821.c2
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/da9055-hwmon.c2
-rw-r--r--drivers/hwmon/emc2103.c15
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c1
-rw-r--r--drivers/i2c/muxes/Kconfig1
-rw-r--r--drivers/ide/Kconfig5
-rw-r--r--drivers/ide/ide-probe.c8
-rw-r--r--drivers/iio/accel/bma180.c8
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c7
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c7
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-event.c3
-rw-r--r--drivers/iio/light/hid-sensor-als.c7
-rw-r--r--drivers/iio/light/hid-sensor-prox.c7
-rw-r--r--drivers/iio/light/tcs3472.c11
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c7
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/st-keyscan.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/tablet/wacom_wac.c28
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c5
-rw-r--r--drivers/iommu/fsl_pamu.c8
-rw-r--r--drivers/iommu/fsl_pamu_domain.c18
-rw-r--r--drivers/irqchip/irq-gic.c7
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c1
-rw-r--r--drivers/isdn/hisax/l3ni1.c14
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c28
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-cache-target.c13
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-io.c22
-rw-r--r--drivers/md/dm-mpath.c5
-rw-r--r--drivers/md/dm-thin-metadata.c9
-rw-r--r--drivers/md/dm-zero.c4
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/media/dvb-frontends/si2168.c16
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c12
-rw-r--r--drivers/media/dvb-frontends/tda10071_priv.h1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/tuners/si2157.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c40
-rw-r--r--drivers/media/usb/gspca/pac7302.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c43
-rw-r--r--drivers/mtd/devices/elm.c2
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/mtd/ubi/fastmap.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c21
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c19
-rw-r--r--drivers/net/ethernet/realtek/r8169.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c42
-rw-r--r--drivers/net/fddi/defxx.c17
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/phy/dp83640.c6
-rw-r--r--drivers/net/phy/mdio_bus.c45
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/ppp/ppp_generic.c30
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/hso.c50
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/r8152.c14
-rw-r--r--drivers/net/usb/smsc95xx.c14
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wan/farsync.c112
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c65
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c1
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c1
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c1
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c1
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c28
-rw-r--r--drivers/net/xen-netback/netback.c86
-rw-r--r--drivers/net/xen-netfront.c27
-rw-r--r--drivers/of/fdt.c66
-rw-r--r--drivers/of/of_mdio.c34
-rw-r--r--drivers/parport/Kconfig12
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-core.c7
-rw-r--r--drivers/phy/phy-omap-usb2.c11
-rw-r--r--drivers/phy/phy-samsung-usb2.c1
-rw-r--r--drivers/pinctrl/berlin/berlin.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c4
-rw-r--r--drivers/pnp/pnpacpi/core.c3
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c2
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c4
-rw-r--r--drivers/staging/vt6655/bssdb.c2
-rw-r--r--drivers/staging/vt6655/device_main.c7
-rw-r--r--drivers/thermal/imx_thermal.c18
-rw-r--r--drivers/thermal/of-thermal.c7
-rw-r--r--drivers/thermal/thermal_hwmon.c33
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c8
-rw-r--r--drivers/tty/serial/pmac_zilog.c3
-rw-r--r--drivers/tty/serial/sunsab.c3
-rw-r--r--drivers/tty/serial/sunzilog.c2
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/hub.c19
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/xen/balloon.c12
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--drivers/xen/manage.c5
-rw-r--r--firmware/Makefile6
-rw-r--r--fs/afs/main.c4
-rw-r--r--fs/aio.c7
-rw-r--r--fs/btrfs/ordered-data.c11
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/direct-io.c23
-rw-r--r--fs/ext4/extents_status.c4
-rw-r--r--fs/ext4/ialloc.c16
-rw-r--r--fs/ext4/mballoc.c4
-rw-r--r--fs/ext4/super.c60
-rw-r--r--fs/f2fs/data.c23
-rw-r--r--fs/f2fs/dir.c2
-rw-r--r--fs/f2fs/f2fs.h6
-rw-r--r--fs/f2fs/file.c12
-rw-r--r--fs/f2fs/inode.c1
-rw-r--r--fs/f2fs/namei.c13
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/f2fs/segment.c5
-rw-r--r--fs/f2fs/super.c4
-rw-r--r--fs/fuse/dev.c51
-rw-r--r--fs/fuse/dir.c41
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/fuse/inode.c27
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/jbd2/transaction.c5
-rw-r--r--fs/kernfs/mount.c30
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs3acl.c43
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/pagelist.c20
-rw-r--r--fs/nfs/write.c335
-rw-r--r--fs/nfsd/nfs4xdr.c6
-rw-r--r--fs/open.c5
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/xfs_bmap.c7
-rw-r--r--fs/xfs/xfs_bmap.h4
-rw-r--r--fs/xfs/xfs_bmap_util.c53
-rw-r--r--fs/xfs/xfs_bmap_util.h4
-rw-r--r--fs/xfs/xfs_btree.c82
-rw-r--r--fs/xfs/xfs_iomap.c3
-rw-r--r--fs/xfs/xfs_sb.c25
-rw-r--r--include/acpi/video.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/drm/drmP.h94
-rw-r--r--include/drm/drm_crtc.h31
-rw-r--r--include/drm/drm_dp_mst_helper.h509
-rw-r--r--include/drm/drm_fb_helper.h4
-rw-r--r--include/drm/drm_mipi_dsi.h19
-rw-r--r--include/drm/drm_of.h18
-rw-r--r--include/drm/drm_panel.h58
-rw-r--r--include/drm/drm_rect.h6
-rw-r--r--include/dt-bindings/clock/exynos5420.h3
-rw-r--r--include/dt-bindings/pinctrl/dra.h7
-rw-r--r--include/linux/component.h7
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/kernfs.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/of_fdt.h3
-rw-r--r--include/linux/of_mdio.h8
-rw-r--r--include/linux/osq_lock.h27
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/percpu-defs.h4
-rw-r--r--include/linux/rcupdate.h46
-rw-r--r--include/linux/rwsem-spinlock.h8
-rw-r--r--include/linux/rwsem.h34
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/net/ip.h11
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netns/ieee802154_6lowpan.h2
-rw-r--r--include/net/netns/nftables.h2
-rw-r--r--include/net/sock.h12
-rw-r--r--include/uapi/drm/drm_mode.h5
-rw-r--r--include/uapi/drm/radeon_drm.h4
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--include/video/samsung_fimd.h3
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--kernel/Kconfig.locks9
-rw-r--r--kernel/cgroup.c58
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/events/core.c34
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/kprobes.c14
-rw-r--r--kernel/locking/mcs_spinlock.c64
-rw-r--r--kernel/locking/mcs_spinlock.h9
-rw-r--r--kernel/locking/mutex.c2
-rw-r--r--kernel/locking/rwsem-spinlock.c28
-rw-r--r--kernel/locking/rwsem-xadd.c16
-rw-r--r--kernel/locking/rwsem.c2
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/rcutorture.c4
-rw-r--r--kernel/rcu/tree.c140
-rw-r--r--kernel/rcu/tree.h6
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c22
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/time/alarmtimer.c20
-rw-r--r--kernel/time/clockevents.c10
-rw-r--r--kernel/time/sched_clock.c4
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--kernel/trace/trace_clock.c9
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/workqueue.c3
-rw-r--r--lib/cpumask.c2
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory-failure.c18
-rw-r--r--mm/memory.c24
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c31
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c102
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/truncate.c11
-rw-r--r--net/8021q/vlan_dev.c13
-rw-r--r--net/appletalk/ddp.c3
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c44
-rw-r--r--net/batman-adv/soft-interface.c60
-rw-r--r--net/batman-adv/translation-table.c26
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bluetooth/hci_conn.c12
-rw-r--r--net/bluetooth/smp.c60
-rw-r--r--net/compat.c9
-rw-r--r--net/core/dev.c32
-rw-r--r--net/core/iovec.c6
-rw-r--r--net/core/neighbour.c11
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/gre_demux.c1
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/ip_tunnel.c12
-rw-r--r--net/ipv4/route.c47
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/mcast.c13
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/cfg.c5
-rw-r--r--net/mac80211/tx.c20
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c1
-rw-r--r--net/netfilter/nf_tables_api.c140
-rw-r--r--net/netfilter/nf_tables_core.c10
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c27
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/flow.h5
-rw-r--r--net/openvswitch/flow_table.c16
-rw-r--r--net/openvswitch/flow_table.h3
-rw-r--r--net/openvswitch/vport-gre.c17
-rw-r--r--net/sched/cls_u32.c19
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/ulpevent.c122
-rw-r--r--net/tipc/bcast.c1
-rw-r--r--net/tipc/msg.c11
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--net/wireless/reg.c22
-rw-r--r--net/wireless/trace.h3
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_user.c7
-rwxr-xr-xscripts/kernel-doc15
-rw-r--r--sound/firewire/bebob/bebob_maudio.c53
-rw-r--r--sound/pci/hda/hda_controller.c3
-rw-r--r--sound/pci/hda/hda_intel.c12
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/hda_tegra.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c1
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h4
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h8
-rw-r--r--tools/lib/lockdep/preload.c20
-rw-r--r--tools/perf/ui/browsers/hists.c21
-rw-r--r--tools/perf/util/machine.c54
-rw-r--r--tools/thermal/tmon/Makefile2
-rw-r--r--tools/thermal/tmon/tmon.c26
-rw-r--r--virt/kvm/arm/vgic.c24
908 files changed, 30886 insertions, 9769 deletions
diff --git a/.mailmap b/.mailmap
index df1baba43a64..1ad68731fb47 100644
--- a/.mailmap
+++ b/.mailmap
@@ -62,6 +62,11 @@ Jeff Garzik <[email protected]>
Jens Axboe <[email protected]>
Jens Osterkamp <[email protected]>
John Stultz <[email protected]>
Juha Yrjola <at solidboot.com>
Juha Yrjola <[email protected]>
Juha Yrjola <[email protected]>
diff --git a/CREDITS b/CREDITS
index 28ee1514b9de..a80b66718f66 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3511,10 +3511,11 @@ S: MacGregor A.C.T 2615
S: Australia
N: Josh Triplett
-P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87 CA26 189B 9946 D0FE 7AFB
-D: rcutorture maintainer
+P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C 1E67 0ED9 A3DF 8AFF 873D
+D: RCU and rcutorture
D: lock annotations, finding and fixing lock bugs
+D: kernel tinification
N: Winfried Trümper
diff --git a/Documentation/Changes b/Documentation/Changes
index 2254db0f00a5..227bec88021e 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -280,12 +280,9 @@ that is possible.
mcelog
------
-In Linux 2.6.31+ the i386 kernel needs to run the mcelog utility
-as a regular cronjob similar to the x86-64 kernel to process and log
-machine check events when CONFIG_X86_NEW_MCE is enabled. Machine check
-events are errors reported by the CPU. Processing them is strongly encouraged.
-All x86-64 kernels since 2.6.4 require the mcelog utility to
-process machine checks.
+On x86 kernels the mcelog utility is needed to process and log machine check
+events when CONFIG_X86_MCE is enabled. Machine check events are errors reported
+by the CPU. Processing them is strongly encouraged.
Getting updated software
========================
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index b314a42bb18c..1d3756d3176c 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -2338,6 +2338,12 @@ void intel_crt_init(struct drm_device *dev)
!Edrivers/gpu/drm/drm_dp_helper.c
</sect2>
<sect2>
+ <title>Display Port MST Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
+!Iinclude/drm/drm_dp_mst_helper.h
+!Edrivers/gpu/drm/drm_dp_mst_topology.c
+ </sect2>
+ <sect2>
<title>EDID Helper Functions Reference</title>
!Edrivers/gpu/drm/drm_edid.c
</sect2>
@@ -2502,7 +2508,7 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Description/Restrictions</td>
</tr>
<tr>
- <td rowspan="20" valign="top" >DRM</td>
+ <td rowspan="21" valign="top" >DRM</td>
<td rowspan="2" valign="top" >Generic</td>
<td valign="top" >“EDID”</td>
<td valign="top" >BLOB | IMMUTABLE</td>
@@ -2633,7 +2639,7 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td rowspan="2" valign="top" >Optional</td>
+ <td rowspan="3" valign="top" >Optional</td>
<td valign="top" >“scaling mode”</td>
<td valign="top" >ENUM</td>
<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
@@ -2641,6 +2647,15 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
+ <td valign="top" >"aspect ratio"</td>
+ <td valign="top" >ENUM</td>
+ <td valign="top" >{ "None", "4:3", "16:9" }</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >DRM property to set aspect ratio from user space app.
+ This enum is made generic to allow addition of custom aspect
+ ratios.</td>
+ </tr>
+ <tr>
<td valign="top" >“dirty”</td>
<td valign="top" >ENUM | IMMUTABLE</td>
<td valign="top" >{ "Off", "On", "Annotate" }</td>
@@ -2649,7 +2664,7 @@ void intel_crt_init(struct drm_device *dev)
</tr>
<tr>
<td rowspan="21" valign="top" >i915</td>
- <td rowspan="3" valign="top" >Generic</td>
+ <td rowspan="2" valign="top" >Generic</td>
<td valign="top" >"Broadcast RGB"</td>
<td valign="top" >ENUM</td>
<td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
@@ -2664,10 +2679,11 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td valign="top" >Standard name as in DRM</td>
- <td valign="top" >Standard type as in DRM</td>
- <td valign="top" >Standard value as in DRM</td>
- <td valign="top" >Standard Object as in DRM</td>
+ <td rowspan="1" valign="top" >Plane</td>
+ <td valign="top" >“rotation”</td>
+ <td valign="top" >BITMASK</td>
+ <td valign="top" >{ 0, "rotate-0" }, { 2, "rotate-180" }</td>
+ <td valign="top" >Plane</td>
<td valign="top" >TBD</td>
</tr>
<tr>
@@ -2799,8 +2815,8 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td rowspan="3" valign="top" >CDV gma-500</td>
- <td rowspan="3" valign="top" >Generic</td>
+ <td rowspan="2" valign="top" >CDV gma-500</td>
+ <td rowspan="2" valign="top" >Generic</td>
<td valign="top" >"Broadcast RGB"</td>
<td valign="top" >ENUM</td>
<td valign="top" >{ “Full”, “Limited 16:235” }</td>
@@ -2815,15 +2831,8 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td valign="top" >Standard name as in DRM</td>
- <td valign="top" >Standard type as in DRM</td>
- <td valign="top" >Standard value as in DRM</td>
- <td valign="top" >Standard Object as in DRM</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="20" valign="top" >Poulsbo</td>
- <td rowspan="2" valign="top" >Generic</td>
+ <td rowspan="19" valign="top" >Poulsbo</td>
+ <td rowspan="1" valign="top" >Generic</td>
<td valign="top" >“backlight”</td>
<td valign="top" >RANGE</td>
<td valign="top" >Min=0, Max=100</td>
@@ -2831,13 +2840,6 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td valign="top" >Standard name as in DRM</td>
- <td valign="top" >Standard type as in DRM</td>
- <td valign="top" >Standard value as in DRM</td>
- <td valign="top" >Standard Object as in DRM</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
<td rowspan="17" valign="top" >SDVO-TV</td>
<td valign="top" >“mode”</td>
<td valign="top" >ENUM</td>
@@ -3064,7 +3066,7 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td rowspan="3" valign="top" >i2c/ch7006_drv</td>
+ <td rowspan="2" valign="top" >i2c/ch7006_drv</td>
<td valign="top" >Generic</td>
<td valign="top" >“scale”</td>
<td valign="top" >RANGE</td>
@@ -3073,14 +3075,7 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td rowspan="2" valign="top" >TV</td>
- <td valign="top" >Standard names as in DRM</td>
- <td valign="top" >Standard types as in DRM</td>
- <td valign="top" >Standard Values as in DRM</td>
- <td valign="top" >Standard object as in DRM</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
+ <td rowspan="1" valign="top" >TV</td>
<td valign="top" >“mode”</td>
<td valign="top" >ENUM</td>
<td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc"
@@ -3089,7 +3084,7 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td rowspan="16" valign="top" >nouveau</td>
+ <td rowspan="15" valign="top" >nouveau</td>
<td rowspan="6" valign="top" >NV10 Overlay</td>
<td valign="top" >"colorkey"</td>
<td valign="top" >RANGE</td>
@@ -3198,14 +3193,6 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td valign="top" >Generic</td>
- <td valign="top" >Standard name as in DRM</td>
- <td valign="top" >Standard type as in DRM</td>
- <td valign="top" >Standard value as in DRM</td>
- <td valign="top" >Standard Object as in DRM</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
<td rowspan="2" valign="top" >omap</td>
<td rowspan="2" valign="top" >Generic</td>
<td valign="top" >“rotation”</td>
@@ -3236,7 +3223,7 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td rowspan="10" valign="top" >radeon</td>
+ <td rowspan="9" valign="top" >radeon</td>
<td valign="top" >DVI-I</td>
<td valign="top" >“coherent”</td>
<td valign="top" >RANGE</td>
@@ -3308,14 +3295,6 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
- <td valign="top" >Generic</td>
- <td valign="top" >Standard name as in DRM</td>
- <td valign="top" >Standard type as in DRM</td>
- <td valign="top" >Standard value as in DRM</td>
- <td valign="top" >Standard Object as in DRM</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
<td rowspan="3" valign="top" >rcar-du</td>
<td rowspan="3" valign="top" >Generic</td>
<td valign="top" >"alpha"</td>
diff --git a/Documentation/DocBook/gadget.tmpl b/Documentation/DocBook/gadget.tmpl
index 4017f147ba2f..2c425d70f7e2 100644
--- a/Documentation/DocBook/gadget.tmpl
+++ b/Documentation/DocBook/gadget.tmpl
@@ -708,7 +708,7 @@ hardware level details could be very different.
<para>Systems need specialized hardware support to implement OTG,
notably including a special <emphasis>Mini-AB</emphasis> jack
-and associated transciever to support <emphasis>Dual-Role</emphasis>
+and associated transceiver to support <emphasis>Dual-Role</emphasis>
operation:
they can act either as a host, using the standard
Linux-USB host side driver stack,
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl
index 46347f603353..59fb5c077541 100644
--- a/Documentation/DocBook/genericirq.tmpl
+++ b/Documentation/DocBook/genericirq.tmpl
@@ -182,7 +182,7 @@
<para>
Each interrupt is described by an interrupt descriptor structure
irq_desc. The interrupt is referenced by an 'unsigned int' numeric
- value which selects the corresponding interrupt decription structure
+ value which selects the corresponding interrupt description structure
in the descriptor structures array.
The descriptor structure contains status information and pointers
to the interrupt flow method and the interrupt chip structure
@@ -470,7 +470,7 @@ if (desc->irq_data.chip->irq_eoi)
<para>
To avoid copies of identical implementations of IRQ chips the
core provides a configurable generic interrupt chip
- implementation. Developers should check carefuly whether the
+ implementation. Developers should check carefully whether the
generic chip fits their needs before implementing the same
functionality slightly differently themselves.
</para>
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 19f2a5a5a5b4..e584ee12a1e7 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1760,7 +1760,7 @@ as it would be on UP.
</para>
<para>
-There is a furthur optimization possible here: remember our original
+There is a further optimization possible here: remember our original
cache code, where there were no reference counts and the caller simply
held the lock whenever using the object? This is still possible: if
you hold the lock, no one can delete the object, so you don't need to
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index deb71baed328..d7fcdc5a4379 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -677,7 +677,7 @@ and other resources, etc.
<listitem>
<para>
- ATA_QCFLAG_ACTIVE is clared from qc->flags.
+ ATA_QCFLAG_ACTIVE is cleared from qc->flags.
</para>
</listitem>
@@ -708,7 +708,7 @@ and other resources, etc.
<listitem>
<para>
- qc->waiting is claread &amp; completed (in that order).
+ qc->waiting is cleared &amp; completed (in that order).
</para>
</listitem>
@@ -1163,7 +1163,7 @@ and other resources, etc.
<para>
Once sense data is acquired, this type of errors can be
- handled similary to other SCSI errors. Note that sense data
+ handled similarly to other SCSI errors. Note that sense data
may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
&amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR). In such
cases, the error should be considered as an ATA bus error and
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index 4decb46bfa76..03f9a1f8d413 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -68,7 +68,7 @@
several digital tv standards. While it is called as DVB API,
in fact it covers several different video standards including
DVB-T, DVB-S, DVB-C and ATSC. The API is currently being updated
- to documment support also for DVB-S2, ISDB-T and ISDB-S.</para>
+ to document support also for DVB-S2, ISDB-T and ISDB-S.</para>
<para>The third part covers the Remote Controller API.</para>
<para>The fourth part covers the Media Controller API.</para>
<para>For additional information and for the latest development code,
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index cd11926e07c7..7da8f0402af5 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -91,7 +91,7 @@
<listitem><para>
[MTD Interface]</para><para>
These functions provide the interface to the MTD kernel API.
- They are not replacable and provide functionality
+ They are not replaceable and provide functionality
which is complete hardware independent.
</para></listitem>
<listitem><para>
@@ -100,14 +100,14 @@
</para></listitem>
<listitem><para>
[GENERIC]</para><para>
- Generic functions are not replacable and provide functionality
+ Generic functions are not replaceable and provide functionality
which is complete hardware independent.
</para></listitem>
<listitem><para>
[DEFAULT]</para><para>
Default functions provide hardware related functionality which is suitable
for most of the implementations. These functions can be replaced by the
- board driver if neccecary. Those functions are called via pointers in the
+ board driver if necessary. Those functions are called via pointers in the
NAND chip description structure. The board driver can set the functions which
should be replaced by board dependent functions before calling nand_scan().
If the function pointer is NULL on entry to nand_scan() then the pointer
@@ -264,7 +264,7 @@ static void board_hwcontrol(struct mtd_info *mtd, int cmd)
is set up nand_scan() is called. This function tries to
detect and identify then chip. If a chip is found all the
internal data fields are initialized accordingly.
- The structure(s) have to be zeroed out first and then filled with the neccecary
+ The structure(s) have to be zeroed out first and then filled with the necessary
information about the device.
</para>
<programlisting>
@@ -327,7 +327,7 @@ module_init(board_init);
<sect1 id="Exit_function">
<title>Exit function</title>
<para>
- The exit function is only neccecary if the driver is
+ The exit function is only necessary if the driver is
compiled as a module. It releases all resources which
are held by the chip driver and unregisters the partitions
in the MTD layer.
@@ -494,7 +494,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
in this case. See rts_from4.c and diskonchip.c for
implementation reference. In those cases we must also
use bad block tables on FLASH, because the ECC layout is
- interferring with the bad block marker positions.
+ interfering with the bad block marker positions.
See bad block table support for details.
</para>
</sect2>
@@ -542,7 +542,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
<para>
nand_scan() calls the function nand_default_bbt().
nand_default_bbt() selects appropriate default
- bad block table desriptors depending on the chip information
+ bad block table descriptors depending on the chip information
which was retrieved by nand_scan().
</para>
<para>
@@ -554,7 +554,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
<sect2 id="Flash_based_tables">
<title>Flash based tables</title>
<para>
- It may be desired or neccecary to keep a bad block table in FLASH.
+ It may be desired or necessary to keep a bad block table in FLASH.
For AG-AND chips this is mandatory, as they have no factory marked
bad blocks. They have factory marked good blocks. The marker pattern
is erased when the block is erased to be reused. So in case of
@@ -565,10 +565,10 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
of the blocks.
</para>
<para>
- The blocks in which the tables are stored are procteted against
+ The blocks in which the tables are stored are protected against
accidental access by marking them bad in the memory bad block
table. The bad block table management functions are allowed
- to circumvernt this protection.
+ to circumvent this protection.
</para>
<para>
The simplest way to activate the FLASH based bad block table support
@@ -592,7 +592,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
User defined tables are created by filling out a
nand_bbt_descr structure and storing the pointer in the
nand_chip structure member bbt_td before calling nand_scan().
- If a mirror table is neccecary a second structure must be
+ If a mirror table is necessary a second structure must be
created and a pointer to this structure must be stored
in bbt_md inside the nand_chip structure. If the bbt_md
member is set to NULL then only the main table is used
@@ -666,7 +666,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
<para>
For automatic placement some blocks must be reserved for
bad block table storage. The number of reserved blocks is defined
- in the maxblocks member of the babd block table description structure.
+ in the maxblocks member of the bad block table description structure.
Reserving 4 blocks for mirrored tables should be a reasonable number.
This also limits the number of blocks which are scanned for the bad
block table ident pattern.
@@ -1068,11 +1068,11 @@ in this page</entry>
<chapter id="filesystems">
<title>Filesystem support</title>
<para>
- The NAND driver provides all neccecary functions for a
+ The NAND driver provides all necessary functions for a
filesystem via the MTD interface.
</para>
<para>
- Filesystems must be aware of the NAND pecularities and
+ Filesystems must be aware of the NAND peculiarities and
restrictions. One major restrictions of NAND Flash is, that you cannot
write as often as you want to a page. The consecutive writes to a page,
before erasing it again, are restricted to 1-3 writes, depending on the
@@ -1222,7 +1222,7 @@ in this page</entry>
#define NAND_BBT_VERSION 0x00000100
/* Create a bbt if none axists */
#define NAND_BBT_CREATE 0x00000200
-/* Write bbt if neccecary */
+/* Write bbt if necessary */
#define NAND_BBT_WRITE 0x00001000
/* Read and write back block contents when writing bbt */
#define NAND_BBT_SAVECONTENT 0x00002000
diff --git a/Documentation/DocBook/regulator.tmpl b/Documentation/DocBook/regulator.tmpl
index 346e552fa2cc..3b08a085d2c7 100644
--- a/Documentation/DocBook/regulator.tmpl
+++ b/Documentation/DocBook/regulator.tmpl
@@ -155,7 +155,7 @@
release regulators. Functions are
provided to <link linkend='API-regulator-enable'>enable</link>
and <link linkend='API-regulator-disable'>disable</link> the
- reguator and to get and set the runtime parameters of the
+ regulator and to get and set the runtime parameters of the
regulator.
</para>
<para>
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index 95618159e29b..bbe9c1fd5cef 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -766,10 +766,10 @@ framework to set up sysfs files for this region. Simply leave it alone.
<para>
The dynamic memory regions will be allocated when the UIO device file,
<varname>/dev/uioX</varname> is opened.
- Simiar to static memory resources, the memory region information for
+ Similar to static memory resources, the memory region information for
dynamic regions is then visible via sysfs at
<varname>/sys/class/uio/uioX/maps/mapY/*</varname>.
- The dynmaic memory regions will be freed when the UIO device file is
+ The dynamic memory regions will be freed when the UIO device file is
closed. When no processes are holding the device file open, the address
returned to userspace is ~0.
</para>
diff --git a/Documentation/DocBook/usb.tmpl b/Documentation/DocBook/usb.tmpl
index 8d57c1888dca..85fc0e28576f 100644
--- a/Documentation/DocBook/usb.tmpl
+++ b/Documentation/DocBook/usb.tmpl
@@ -153,7 +153,7 @@
<listitem><para>The Linux USB API supports synchronous calls for
control and bulk messages.
- It also supports asynchnous calls for all kinds of data transfer,
+ It also supports asynchronous calls for all kinds of data transfer,
using request structures called "URBs" (USB Request Blocks).
</para></listitem>
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl
index d0056a4e9c53..6f639d9530b5 100644
--- a/Documentation/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl
@@ -5696,7 +5696,7 @@ struct _snd_pcm_runtime {
suspending the PCM operations via
<function>snd_pcm_suspend_all()</function> or
<function>snd_pcm_suspend()</function>. It means that the PCM
- streams are already stoppped when the register snapshot is
+ streams are already stopped when the register snapshot is
taken. But, remember that you don't have to restart the PCM
stream in the resume callback. It'll be restarted via
trigger call with <constant>SNDRV_PCM_TRIGGER_RESUME</constant>
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index fd786ea13a1f..e182be5e3c83 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
configuring GPIOs it can get its ACPI handle and extract this information
from ACPI tables.
-Currently the kernel is not able to automatically determine from which ACPI
-device it should make the corresponding platform device so we need to add
-the ACPI device explicitly to acpi_platform_device_ids list defined in
-drivers/acpi/acpi_platform.c. This limitation is only for the platform
-devices, SPI and I2C devices are created automatically as described below.
-
DMA support
~~~~~~~~~~~
DMA controllers enumerated via ACPI should be registered in the system to
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index e742d21dbd96..a69ffe1d54d5 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -15,10 +15,13 @@ New sysfs files for controlling P state selection have been added to
/sys/devices/system/cpu/intel_pstate/
max_perf_pct: limits the maximum P state that will be requested by
- the driver stated as a percentage of the available performance.
+ the driver stated as a percentage of the available performance. The
+ available (P states) performance may be reduced by the no_turbo
+ setting described below.
min_perf_pct: limits the minimum P state that will be requested by
- the driver stated as a percentage of the available performance.
+ the driver stated as a percentage of the max (non-turbo)
+ performance level.
no_turbo: limits the driver to selecting P states below the turbo
frequency range.
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
index 5216b419016a..8b4f7b7fe88b 100644
--- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
@@ -9,6 +9,18 @@ Required Properties:
- reg: physical base address of the controller and length of memory mapped
region.
+Optional Properties:
+- clocks: List of clock handles. The parent clocks of the input clocks to the
+ devices in this power domain are set to oscclk before power gating
+ and restored back after powering on a domain. This is required for
+ all domains which are powered on and off and not required for unused
+ domains.
+- clock-names: The following clocks can be specified:
+ - oscclk: Oscillator clock.
+ - pclkN, clkN: Pairs of parent of input clock and input clock to the
+ devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
+ are supported currently.
+
Node of a device using power domains must have a samsung,power-domain property
defined with a phandle to respective power domain.
@@ -19,6 +31,14 @@ Example:
reg = <0x10023C00 0x10>;
};
+ mfc_pd: power-domain@10044060 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10044060 0x20>;
+ clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
+ <&clock CLK_MOUT_USER_ACLK333>;
+ clock-names = "oscclk", "pclk0", "clk0";
+ };
+
Example of the node using power domain:
node {
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
index f055515d2b62..366690cb86a3 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
@@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
under node /cpus/cpu@0.
Required properties:
-- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
- for details
+- None
Optional properties:
+- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
+ details. OPPs *must* be supplied either via DT, i.e. this property, or
+ populated at runtime.
- clock-latency: Specify the possible maximum transition latency for clock,
in unit of nanoseconds.
- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
diff --git a/Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt b/Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt
new file mode 100644
index 000000000000..46525ea3e646
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt
@@ -0,0 +1,30 @@
+Device Tree bindings for Armada DRM CRTC driver
+
+Required properties:
+ - compatible: value should be "marvell,dove-lcd".
+ - reg: base address and size of the LCD controller
+ - interrupts: single interrupt number for the LCD controller
+ - port: video output port with endpoints, as described by graph.txt
+
+Optional properties:
+
+ - clocks: as described by clock-bindings.txt
+ - clock-names: as described by clock-bindings.txt
+ "axiclk" - axi bus clock for pixel clock
+ "plldivider" - pll divider clock for pixel clock
+ "ext_ref_clk0" - external clock 0 for pixel clock
+ "ext_ref_clk1" - external clock 1 for pixel clock
+
+Note: all clocks are optional but at least one must be specified.
+Further clocks may be added in the future according to requirements of
+different SoCs.
+
+Example:
+
+ lcd0: lcd-controller@820000 {
+ compatible = "marvell,dove-lcd";
+ reg = <0x820000 0x1000>;
+ interrupts = <47>;
+ clocks = <&si5351 0>;
+ clock-names = "ext_ref_clk_1";
+ };
diff --git a/Documentation/devicetree/bindings/drm/i2c/tda998x.txt b/Documentation/devicetree/bindings/drm/i2c/tda998x.txt
index d7df01c5bb3a..e9e4bce40760 100644
--- a/Documentation/devicetree/bindings/drm/i2c/tda998x.txt
+++ b/Documentation/devicetree/bindings/drm/i2c/tda998x.txt
@@ -3,6 +3,8 @@ Device-Tree bindings for the NXP TDA998x HDMI transmitter
Required properties;
- compatible: must be "nxp,tda998x"
+ - reg: I2C address
+
Optional properties:
- interrupts: interrupt number and trigger type
default: polling
diff --git a/Documentation/devicetree/bindings/drm/msm/gpu.txt b/Documentation/devicetree/bindings/drm/msm/gpu.txt
new file mode 100644
index 000000000000..67d0a58dbb77
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/gpu.txt
@@ -0,0 +1,52 @@
+Qualcomm adreno/snapdragon GPU
+
+Required properties:
+- compatible: "qcom,adreno-3xx"
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt signal from the gpu.
+- clocks: device clocks
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required:
+ * "core_clk"
+ * "iface_clk"
+ * "mem_iface_clk"
+- qcom,chipid: gpu chip-id. Note this may become optional for future
+ devices if we can reliably read the chipid from hw
+- qcom,gpu-pwrlevels: list of operating points
+ - compatible: "qcom,gpu-pwrlevels"
+ - for each qcom,gpu-pwrlevel:
+ - qcom,gpu-freq: requested gpu clock speed
+ - NOTE: downstream android driver defines additional parameters to
+ configure memory bandwidth scaling per OPP.
+
+Example:
+
+/ {
+ ...
+
+ gpu: qcom,kgsl-3d0@4300000 {
+ compatible = "qcom,adreno-3xx";
+ reg = <0x04300000 0x20000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <GIC_SPI 80 0>;
+ interrupt-names = "kgsl_3d0_irq";
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "mem_iface_clk";
+ clocks =
+ <&mmcc GFX3D_CLK>,
+ <&mmcc GFX3D_AHB_CLK>,
+ <&mmcc MMSS_IMEM_AHB_CLK>;
+ qcom,chipid = <0x03020100>;
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <450000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <27000000>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi.txt b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
new file mode 100644
index 000000000000..aca917fe2ba7
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
@@ -0,0 +1,46 @@
+Qualcomm adreno/snapdragon hdmi output
+
+Required properties:
+- compatible: one of the following
+ * "qcom,hdmi-tx-8660"
+ * "qcom,hdmi-tx-8960"
+- reg: Physical base address and length of the controller's registers
+- reg-names: "core_physical"
+- interrupts: The interrupt signal from the hdmi block.
+- clocks: device clocks
+ See ../clocks/clock-bindings.txt for details.
+- qcom,hdmi-tx-ddc-clk-gpio: ddc clk pin
+- qcom,hdmi-tx-ddc-data-gpio: ddc data pin
+- qcom,hdmi-tx-hpd-gpio: hpd pin
+- core-vdda-supply: phandle to supply regulator
+- hdmi-mux-supply: phandle to mux regulator
+
+Optional properties:
+- qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
+- qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin
+
+Example:
+
+/ {
+ ...
+
+ hdmi: qcom,hdmi-tx-8960@4a00000 {
+ compatible = "qcom,hdmi-tx-8960";
+ reg-names = "core_physical";
+ reg = <0x04a00000 0x1000>;
+ interrupts = <GIC_SPI 79 0>;
+ clock-names =
+ "core_clk",
+ "master_iface_clk",
+ "slave_iface_clk";
+ clocks =
+ <&mmcc HDMI_APP_CLK>,
+ <&mmcc HDMI_M_AHB_CLK>,
+ <&mmcc HDMI_S_AHB_CLK>;
+ qcom,hdmi-tx-ddc-clk = <&msmgpio 70 GPIO_ACTIVE_HIGH>;
+ qcom,hdmi-tx-ddc-data = <&msmgpio 71 GPIO_ACTIVE_HIGH>;
+ qcom,hdmi-tx-hpd = <&msmgpio 72 GPIO_ACTIVE_HIGH>;
+ core-vdda-supply = <&pm8921_hdmi_mvs>;
+ hdmi-mux-supply = <&ext_3p3v>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/drm/msm/mdp.txt b/Documentation/devicetree/bindings/drm/msm/mdp.txt
new file mode 100644
index 000000000000..1a0598e5279d
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/mdp.txt
@@ -0,0 +1,48 @@
+Qualcomm adreno/snapdragon display controller
+
+Required properties:
+- compatible:
+ * "qcom,mdp" - mdp4
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt signal from the display controller.
+- connectors: array of phandles for output device(s)
+- clocks: device clocks
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required:
+ * "core_clk"
+ * "iface_clk"
+ * "lut_clk"
+ * "src_clk"
+ * "hdmi_clk"
+ * "mpd_clk"
+
+Optional properties:
+- gpus: phandle for gpu device
+
+Example:
+
+/ {
+ ...
+
+ mdp: qcom,mdp@5100000 {
+ compatible = "qcom,mdp";
+ reg = <0x05100000 0xf0000>;
+ interrupts = <GIC_SPI 75 0>;
+ connectors = <&hdmi>;
+ gpus = <&gpu>;
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "lut_clk",
+ "src_clk",
+ "hdmi_clk",
+ "mdp_clk";
+ clocks =
+ <&mmcc MDP_SRC>,
+ <&mmcc MDP_AHB_CLK>,
+ <&mmcc MDP_LUT_CLK>,
+ <&mmcc TV_SRC>,
+ <&mmcc HDMI_TV_CLK>,
+ <&mmcc MDP_TV_CLK>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
new file mode 100644
index 000000000000..2d150c311a05
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -0,0 +1,189 @@
+STMicroelectronics stih4xx platforms
+
+- sti-vtg: video timing generator
+ Required properties:
+ - compatible: "st,vtg"
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ Optional properties:
+ - interrupts : VTG interrupt number to the CPU.
+ - st,slave: phandle on a slave vtg
+
+- sti-vtac: video timing advanced inter dye communication Rx and TX
+ Required properties:
+ - compatible: "st,vtac-main" or "st,vtac-aux"
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ - clocks: from common clock binding: handle hardware IP needed clocks, the
+ number of clocks may depend of the SoC type.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: names of the clocks listed in clocks property in the same
+ order.
+
+- sti-display-subsystem: Master device for DRM sub-components
+ This device must be the parent of all the sub-components and is responsible
+ of bind them.
+ Required properties:
+ - compatible: "st,sti-display-subsystem"
+ - ranges: to allow probing of subdevices
+
+- sti-compositor: frame compositor engine
+ must be a child of sti-display-subsystem
+ Required properties:
+ - compatible: "st,stih<chip>-compositor"
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ - clocks: from common clock binding: handle hardware IP needed clocks, the
+ number of clocks may depend of the SoC type.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: names of the clocks listed in clocks property in the same
+ order.
+ - resets: resets to be used by the device
+ See ../reset/reset.txt for details.
+ - reset-names: names of the resets listed in resets property in the same
+ order.
+ - st,vtg: phandle(s) on vtg device (main and aux) nodes.
+
+- sti-tvout: video out hardware block
+ must be a child of sti-display-subsystem
+ Required properties:
+ - compatible: "st,stih<chip>-tvout"
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ - reg-names: names of the mapped memory regions listed in regs property in
+ the same order.
+ - resets: resets to be used by the device
+ See ../reset/reset.txt for details.
+ - reset-names: names of the resets listed in resets property in the same
+ order.
+ - ranges: to allow probing of subdevices
+
+- sti-hdmi: hdmi output block
+ must be a child of sti-tvout
+ Required properties:
+ - compatible: "st,stih<chip>-hdmi";
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ - reg-names: names of the mapped memory regions listed in regs property in
+ the same order.
+ - interrupts : HDMI interrupt number to the CPU.
+ - interrupt-names: name of the interrupts listed in interrupts property in
+ the same order
+ - clocks: from common clock binding: handle hardware IP needed clocks, the
+ number of clocks may depend of the SoC type.
+ - clock-names: names of the clocks listed in clocks property in the same
+ order.
+ - hdmi,hpd-gpio: gpio id to detect if an hdmi cable is plugged or not.
+
+sti-hda:
+ Required properties:
+ must be a child of sti-tvout
+ - compatible: "st,stih<chip>-hda"
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ - reg-names: names of the mapped memory regions listed in regs property in
+ the same order.
+ - clocks: from common clock binding: handle hardware IP needed clocks, the
+ number of clocks may depend of the SoC type.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: names of the clocks listed in clocks property in the same
+ order.
+
+Example:
+
+/ {
+ ...
+
+ vtg_main_slave: sti-vtg-main-slave@fe85A800 {
+ compatible = "st,vtg";
+ reg = <0xfe85A800 0x300>;
+ interrupts = <GIC_SPI 175 IRQ_TYPE_NONE>;
+ };
+
+ vtg_main: sti-vtg-main-master@fd348000 {
+ compatible = "st,vtg";
+ reg = <0xfd348000 0x400>;
+ st,slave = <&vtg_main_slave>;
+ };
+
+ vtg_aux_slave: sti-vtg-aux-slave@fd348400 {
+ compatible = "st,vtg";
+ reg = <0xfe858200 0x300>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_NONE>;
+ };
+
+ vtg_aux: sti-vtg-aux-master@fd348400 {
+ compatible = "st,vtg";
+ reg = <0xfd348400 0x400>;
+ st,slave = <&vtg_aux_slave>;
+ };
+
+
+ sti-vtac-rx-main@fee82800 {
+ compatible = "st,vtac-main";
+ reg = <0xfee82800 0x200>;
+ clock-names = "vtac";
+ clocks = <&clk_m_a2_div0 CLK_M_VTAC_MAIN_PHY>;
+ };
+
+ sti-vtac-rx-aux@fee82a00 {
+ compatible = "st,vtac-aux";
+ reg = <0xfee82a00 0x200>;
+ clock-names = "vtac";
+ clocks = <&clk_m_a2_div0 CLK_M_VTAC_AUX_PHY>;
+ };
+
+ sti-vtac-tx-main@fd349000 {
+ compatible = "st,vtac-main";
+ reg = <0xfd349000 0x200>, <0xfd320000 0x10000>;
+ clock-names = "vtac";
+ clocks = <&clk_s_a1_hs CLK_S_VTAC_TX_PHY>;
+ };
+
+ sti-vtac-tx-aux@fd349200 {
+ compatible = "st,vtac-aux";
+ reg = <0xfd349200 0x200>, <0xfd320000 0x10000>;
+ clock-names = "vtac";
+ clocks = <&clk_s_a1_hs CLK_S_VTAC_TX_PHY>;
+ };
+
+ sti-display-subsystem {
+ compatible = "st,sti-display-subsystem";
+ ranges;
+
+ sti-compositor@fd340000 {
+ compatible = "st,stih416-compositor";
+ reg = <0xfd340000 0x1000>;
+ clock-names = "compo_main", "compo_aux",
+ "pix_main", "pix_aux";
+ clocks = <&clk_m_a2_div1 CLK_M_COMPO_MAIN>, <&clk_m_a2_div1 CLK_M_COMPO_AUX>,
+ <&clockgen_c_vcc CLK_S_PIX_MAIN>, <&clockgen_c_vcc CLK_S_PIX_AUX>;
+ reset-names = "compo-main", "compo-aux";
+ resets = <&softreset STIH416_COMPO_M_SOFTRESET>, <&softreset STIH416_COMPO_A_SOFTRESET>;
+ st,vtg = <&vtg_main>, <&vtg_aux>;
+ };
+
+ sti-tvout@fe000000 {
+ compatible = "st,stih416-tvout";
+ reg = <0xfe000000 0x1000>, <0xfe85a000 0x400>, <0xfe830000 0x10000>;
+ reg-names = "tvout-reg", "hda-reg", "syscfg";
+ reset-names = "tvout";
+ resets = <&softreset STIH416_HDTVOUT_SOFTRESET>;
+ ranges;
+
+ sti-hdmi@fe85c000 {
+ compatible = "st,stih416-hdmi";
+ reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
+ reg-names = "hdmi-reg", "syscfg";
+ interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>;
+ interrupt-names = "irq";
+ clock-names = "pix", "tmds", "phy", "audio";
+ clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
+ hdmi,hpd-gpio = <&PIO2 5>;
+ };
+
+ sti-hda@fe85a000 {
+ compatible = "st,stih416-hda";
+ reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>;
+ reg-names = "hda-reg", "video-dacs-ctrl";
+ clock-names = "pix", "hddac";
+ clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
+ };
+ };
+ };
+ ...
+};
diff --git a/Documentation/devicetree/bindings/panel/auo,b133htn01.txt b/Documentation/devicetree/bindings/panel/auo,b133htn01.txt
new file mode 100644
index 000000000000..302226b5bb55
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b133htn01.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
+
+Required properties:
+- compatible: should be "auo,b133htn01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/foxlink,fl500wvr00-a0t.txt b/Documentation/devicetree/bindings/panel/foxlink,fl500wvr00-a0t.txt
new file mode 100644
index 000000000000..b47f9d87bc19
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/foxlink,fl500wvr00-a0t.txt
@@ -0,0 +1,7 @@
+Foxlink Group 5" WVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "foxlink,fl500wvr00-a0t"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,n116bge.txt b/Documentation/devicetree/bindings/panel/innolux,n116bge.txt
new file mode 100644
index 000000000000..081bb939ed31
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/innolux,n116bge.txt
@@ -0,0 +1,7 @@
+Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,n116bge"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,n156bge-l21.txt b/Documentation/devicetree/bindings/panel/innolux,n156bge-l21.txt
new file mode 100644
index 000000000000..7825844aafdf
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/innolux,n156bge-l21.txt
@@ -0,0 +1,7 @@
+InnoLux 15.6" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,n156bge-l21"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index 64fd7dec1bbc..b3556609a06f 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -4,6 +4,13 @@ Required properties:
- compatible: Must contain one of the following:
+ - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
+ - "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
+ - "renesas,scifa-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFA compatible UART.
+ - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
+ - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
+ - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
+ - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
- "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
- "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
- "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART.
diff --git a/Documentation/devicetree/bindings/video/exynos_dsim.txt b/Documentation/devicetree/bindings/video/exynos_dsim.txt
index 33b5730d07ba..31036c667d54 100644
--- a/Documentation/devicetree/bindings/video/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/video/exynos_dsim.txt
@@ -1,7 +1,9 @@
Exynos MIPI DSI Master
Required properties:
- - compatible: "samsung,exynos4210-mipi-dsi"
+ - compatible: value should be one of the following
+ "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
+ "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
- reg: physical base address and length of the registers set for the device
- interrupts: should contain DSI interrupt
- clocks: list of clock specifiers, must contain an entry for each required
diff --git a/Documentation/devicetree/bindings/video/exynos_mixer.txt b/Documentation/devicetree/bindings/video/exynos_mixer.txt
index 7bfde9c9d658..08b394b1edbf 100644
--- a/Documentation/devicetree/bindings/video/exynos_mixer.txt
+++ b/Documentation/devicetree/bindings/video/exynos_mixer.txt
@@ -4,8 +4,9 @@ Required properties:
- compatible: value should be one of the following:
1) "samsung,exynos5-mixer" <DEPRECATED>
2) "samsung,exynos4210-mixer"
- 3) "samsung,exynos5250-mixer"
- 4) "samsung,exynos5420-mixer"
+ 3) "samsung,exynos4212-mixer"
+ 4) "samsung,exynos5250-mixer"
+ 5) "samsung,exynos5420-mixer"
- reg: physical base address of the mixer and length of memory mapped
region.
diff --git a/Documentation/devicetree/bindings/video/samsung-fimd.txt b/Documentation/devicetree/bindings/video/samsung-fimd.txt
index 2dad41b689af..8428fcff8037 100644
--- a/Documentation/devicetree/bindings/video/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/video/samsung-fimd.txt
@@ -44,6 +44,34 @@ Optional Properties:
- display-timings: timing settings for FIMD, as described in document [1].
Can be used in case timings cannot be provided otherwise
or to override timings provided by the panel.
+- samsung,sysreg: handle to syscon used to control the system registers
+- i80-if-timings: timing configuration for lcd i80 interface support.
+ - cs-setup: clock cycles for the active period of address signal is enabled
+ until chip select is enabled.
+ If not specified, the default value(0) will be used.
+ - wr-setup: clock cycles for the active period of CS signal is enabled until
+ write signal is enabled.
+ If not specified, the default value(0) will be used.
+ - wr-active: clock cycles for the active period of CS is enabled.
+ If not specified, the default value(1) will be used.
+ - wr-hold: clock cycles for the active period of CS is disabled until write
+ signal is disabled.
+ If not specified, the default value(0) will be used.
+
+ The parameters are defined as:
+
+ VCLK(internal) __|??????|_____|??????|_____|??????|_____|??????|_____|??
+ : : : : :
+ Address Output --:<XXXXXXXXXXX:XXXXXXXXXXXX:XXXXXXXXXXXX:XXXXXXXXXXXX:XX
+ | cs-setup+1 | : : :
+ |<---------->| : : :
+ Chip Select ???????????????|____________:____________:____________|??
+ | wr-setup+1 | | wr-hold+1 |
+ |<---------->| |<---------->|
+ Write Enable ????????????????????????????|____________|???????????????
+ | wr-active+1|
+ |<---------->|
+ Video Data ----------------------------<XXXXXXXXXXXXXXXXXXXXXXXXX>--
The device node can contain 'port' child nodes according to the bindings defined
in [2]. The following are properties specific to those nodes:
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index f1ea2c69648d..c587a966413e 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
device.
+INPUT_PROP_TOPBUTTONPAD:
+-----------------------
+Some laptops, most notably the Lenovo *40 series provide a trackstick
+device but do not have physical buttons associated with the trackstick
+device. Instead, the top area of the touchpad is marked to show
+visual/haptic areas for left, middle, right buttons intended to be used
+with the trackstick.
+
+If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
+accordingly. This property does not affect kernel behavior.
+The kernel does not provide button emulation for such devices but treats
+them as any other INPUT_PROP_BUTTONPAD device.
+
Guidelines:
==========
The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c1b9aa8c5a52..b7fa2f599459 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2790,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
leaf rcu_node structure. Useful for very large
systems.
+ rcutree.jiffies_till_sched_qs= [KNL]
+ Set required age in jiffies for a
+ given grace period before RCU starts
+ soliciting quiescent-state help from
+ rcu_note_context_switch().
+
rcutree.jiffies_till_first_fqs= [KNL]
Set delay from grace-period initialization to
first attempt to force quiescent states.
@@ -3526,7 +3532,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the allocated input device; If set to 0, video driver
will only send out the event without touching backlight
brightness level.
- default: 0
+ default: 1
virtio_mmio.device=
[VMMIO] Memory mapped virtio (platform) device.
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index d13b9a9a9e00..d399ae1fc724 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -8,8 +8,8 @@ disk-shock-protection.txt
- information on hard disk shock protection.
dslm.c
- Simple Disk Sleep Monitor program
-hpfall.c
- - (HP) laptop accelerometer program for disk protection.
+freefall.c
+ - (HP/DELL) laptop accelerometer program for disk protection.
laptop-mode.txt
- how to conserve battery power using laptop-mode.
sony-laptop.txt
diff --git a/Documentation/laptops/hpfall.c b/Documentation/laptops/freefall.c
index b85dbbac0499..aab2ff09e868 100644
--- a/Documentation/laptops/hpfall.c
+++ b/Documentation/laptops/freefall.c
@@ -1,7 +1,9 @@
-/* Disk protection for HP machines.
+/* Disk protection for HP/DELL machines.
*
* Copyright 2008 Eric Piel
* Copyright 2009 Pavel Machek <[email protected]>
+ * Copyright 2012 Sonal Santan
+ * Copyright 2014 Pali Rohár <[email protected]>
*
* GPLv2.
*/
@@ -18,24 +20,31 @@
#include <signal.h>
#include <sys/mman.h>
#include <sched.h>
+#include <syslog.h>
-char unload_heads_path[64];
+static int noled;
+static char unload_heads_path[64];
+static char device_path[32];
+static const char app_name[] = "FREE FALL";
-int set_unload_heads_path(char *device)
+static int set_unload_heads_path(char *device)
{
char devname[64];
if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
return -EINVAL;
- strncpy(devname, device + 5, sizeof(devname));
+ strncpy(devname, device + 5, sizeof(devname) - 1);
+ strncpy(device_path, device, sizeof(device_path) - 1);
snprintf(unload_heads_path, sizeof(unload_heads_path) - 1,
"/sys/block/%s/device/unload_heads", devname);
return 0;
}
-int valid_disk(void)
+
+static int valid_disk(void)
{
int fd = open(unload_heads_path, O_RDONLY);
+
if (fd < 0) {
perror(unload_heads_path);
return 0;
@@ -45,43 +54,54 @@ int valid_disk(void)
return 1;
}
-void write_int(char *path, int i)
+static void write_int(char *path, int i)
{
char buf[1024];
int fd = open(path, O_RDWR);
+
if (fd < 0) {
perror("open");
exit(1);
}
+
sprintf(buf, "%d", i);
+
if (write(fd, buf, strlen(buf)) != strlen(buf)) {
perror("write");
exit(1);
}
+
close(fd);
}
-void set_led(int on)
+static void set_led(int on)
{
+ if (noled)
+ return;
write_int("/sys/class/leds/hp::hddprotect/brightness", on);
}
-void protect(int seconds)
+static void protect(int seconds)
{
+ const char *str = (seconds == 0) ? "Unparked" : "Parked";
+
write_int(unload_heads_path, seconds*1000);
+ syslog(LOG_INFO, "%s %s disk head\n", str, device_path);
}
-int on_ac(void)
+static int on_ac(void)
{
-// /sys/class/power_supply/AC0/online
+ /* /sys/class/power_supply/AC0/online */
+ return 1;
}
-int lid_open(void)
+static int lid_open(void)
{
-// /proc/acpi/button/lid/LID/state
+ /* /proc/acpi/button/lid/LID/state */
+ return 1;
}
-void ignore_me(void)
+static void ignore_me(int signum)
{
protect(0);
set_led(0);
@@ -90,6 +110,7 @@ void ignore_me(void)
int main(int argc, char **argv)
{
int fd, ret;
+ struct stat st;
struct sched_param param;
if (argc == 1)
@@ -111,7 +132,16 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
- daemon(0, 0);
+ if (stat("/sys/class/leds/hp::hddprotect/brightness", &st))
+ noled = 1;
+
+ if (daemon(0, 0) != 0) {
+ perror("daemon");
+ return EXIT_FAILURE;
+ }
+
+ openlog(app_name, LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1);
+
param.sched_priority = sched_get_priority_max(SCHED_FIFO);
sched_setscheduler(0, SCHED_FIFO, &param);
mlockall(MCL_CURRENT|MCL_FUTURE);
@@ -141,6 +171,7 @@ int main(int argc, char **argv)
alarm(20);
}
+ closelog();
close(fd);
return EXIT_SUCCESS;
}
diff --git a/MAINTAINERS b/MAINTAINERS
index 6813d0aa5ecf..c2066f4c3286 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -156,7 +156,6 @@ F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <[email protected]>
-M: Francois Romieu <[email protected]>
S: Maintained
F: drivers/net/ethernet/realtek/r8169.c
@@ -1314,6 +1313,20 @@ W: http://oss.renesas.com
Q: http://patchwork.kernel.org/project/linux-sh/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
S: Supported
+F: arch/arm/boot/dts/emev2*
+F: arch/arm/boot/dts/r7s*
+F: arch/arm/boot/dts/r8a*
+F: arch/arm/boot/dts/sh*
+F: arch/arm/configs/ape6evm_defconfig
+F: arch/arm/configs/armadillo800eva_defconfig
+F: arch/arm/configs/bockw_defconfig
+F: arch/arm/configs/genmai_defconfig
+F: arch/arm/configs/koelsch_defconfig
+F: arch/arm/configs/kzm9g_defconfig
+F: arch/arm/configs/lager_defconfig
+F: arch/arm/configs/mackerel_defconfig
+F: arch/arm/configs/marzen_defconfig
+F: arch/arm/configs/shmobile_defconfig
F: arch/arm/mach-shmobile/
F: drivers/sh/
@@ -4497,8 +4510,7 @@ S: Supported
F: drivers/idle/i7300_idle.c
IEEE 802.15.4 SUBSYSTEM
-M: Alexander Smirnov <[email protected]>
-M: Dmitry Eremin-Solenikov <[email protected]>
+M: Alexander Aring <[email protected]>
L: [email protected] (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
@@ -6787,7 +6799,7 @@ F: arch/x86/kernel/quirks.c
PCI DRIVER FOR IMX6
M: Richard Zhu <[email protected]>
-M: Shawn Guo <[email protected]>
+M: Shawn Guo <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Maintained
@@ -6944,6 +6956,12 @@ L: [email protected] (moderated for non-subscribers)
S: Maintained
F: drivers/pinctrl/pinctrl-at91.c
+PIN CONTROLLER - RENESAS
+M: Laurent Pinchart <[email protected]>
+S: Maintained
+F: drivers/pinctrl/sh-pfc/
+
PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <[email protected]>
M: Thomas Abraham <[email protected]>
@@ -7406,7 +7424,7 @@ S: Orphan
F: drivers/net/wireless/ray*
RCUTORTURE MODULE
-M: Josh Triplett <[email protected]>
+M: Josh Triplett <[email protected]>
M: "Paul E. McKenney" <[email protected]>
S: Supported
@@ -8007,6 +8025,16 @@ F: drivers/ata/
F: include/linux/ata.h
F: include/linux/libata.h
+SERIAL ATA AHCI PLATFORM devices support
+M: Hans de Goede <[email protected]>
+M: Tejun Heo <[email protected]>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Supported
+F: drivers/ata/ahci_platform.c
+F: drivers/ata/libahci_platform.c
+F: include/linux/ahci_platform.h
+
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
M: Jayamohan Kallickal <[email protected]>
@@ -8894,7 +8922,7 @@ M: Stephen Warren <[email protected]>
M: Thierry Reding <[email protected]>
Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git
S: Supported
N: [^a-z]tegra
@@ -8984,7 +9012,7 @@ F: drivers/media/radio/radio-raremono.c
THERMAL
M: Zhang Rui <[email protected]>
-M: Eduardo Valentin <[email protected]>
+M: Eduardo Valentin <[email protected]>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
@@ -9011,7 +9039,7 @@ S: Maintained
F: drivers/platform/x86/thinkpad_acpi.c
TI BANDGAP AND THERMAL DRIVER
-M: Eduardo Valentin <[email protected]>
+M: Eduardo Valentin <[email protected]>
S: Supported
F: drivers/thermal/ti-soc-thermal/
diff --git a/Makefile b/Makefile
index 4d75b4bceedd..d0901b46b4bf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 16
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION =
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
@@ -41,6 +41,29 @@ unexport GREP_OPTIONS
# descending is started. They are now explicitly listed as the
# prepare rule.
+# Beautify output
+# ---------------------------------------------------------------------------
+#
+# Normally, we echo the whole command before executing it. By making
+# that echo $($(quiet)$(cmd)), we now have the possibility to set
+# $(quiet) to choose other forms of output instead, e.g.
+#
+# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
+# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
+#
+# If $(quiet) is empty, the whole command will be printed.
+# If it is set to "quiet_", only the short version will be printed.
+# If it is set to "silent_", nothing will be printed at all, since
+# the variable $(silent_cmd_cc_o_c) doesn't exist.
+#
+# A simple variant is to prefix commands with $(Q) - that's useful
+# for commands that shall be hidden in non-verbose mode.
+#
+# $(Q)ln $@ :<
+#
+# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
+# If KBUILD_VERBOSE equals 1 then the above command is displayed.
+#
# To put more focus on warnings, be less verbose as default
# Use 'make V=1' to see the full commands
@@ -51,6 +74,29 @@ ifndef KBUILD_VERBOSE
KBUILD_VERBOSE = 0
endif
+ifeq ($(KBUILD_VERBOSE),1)
+ quiet =
+ Q =
+else
+ quiet=quiet_
+ Q = @
+endif
+
+# If the user is running make -s (silent mode), suppress echoing of
+# commands
+
+ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+ quiet=silent_
+endif
+else # make-3.8x
+ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
+ quiet=silent_
+endif
+endif
+
+export quiet Q KBUILD_VERBOSE
+
# Call a source code checker (by default, "sparse") as part of the
# C compilation.
#
@@ -128,8 +174,11 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
# Fake the "Entering directory" message once, so that IDEs/editors are
# able to understand relative filenames.
+ echodir := @echo
+ quiet_echodir := @echo
+silent_echodir := @:
sub-make: FORCE
- @echo "make[1]: Entering directory \`$(KBUILD_OUTPUT)'"
+ $($(quiet)echodir) "make[1]: Entering directory \`$(KBUILD_OUTPUT)'"
$(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
KBUILD_SRC=$(CURDIR) \
KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile \
@@ -292,52 +341,6 @@ endif
export KBUILD_MODULES KBUILD_BUILTIN
export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
-# Beautify output
-# ---------------------------------------------------------------------------
-#
-# Normally, we echo the whole command before executing it. By making
-# that echo $($(quiet)$(cmd)), we now have the possibility to set
-# $(quiet) to choose other forms of output instead, e.g.
-#
-# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
-# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
-#
-# If $(quiet) is empty, the whole command will be printed.
-# If it is set to "quiet_", only the short version will be printed.
-# If it is set to "silent_", nothing will be printed at all, since
-# the variable $(silent_cmd_cc_o_c) doesn't exist.
-#
-# A simple variant is to prefix commands with $(Q) - that's useful
-# for commands that shall be hidden in non-verbose mode.
-#
-# $(Q)ln $@ :<
-#
-# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
-# If KBUILD_VERBOSE equals 1 then the above command is displayed.
-
-ifeq ($(KBUILD_VERBOSE),1)
- quiet =
- Q =
-else
- quiet=quiet_
- Q = @
-endif
-
-# If the user is running make -s (silent mode), suppress echoing of
-# commands
-
-ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
-ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
- quiet=silent_
-endif
-else # make-3.8x
-ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
- quiet=silent_
-endif
-endif
-
-export quiet Q KBUILD_VERBOSE
-
ifneq ($(CC),)
ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
COMPILER := clang
@@ -685,6 +688,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
endif
endif
+KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
+
ifdef CONFIG_DEBUG_INFO
KBUILD_CFLAGS += -g
KBUILD_AFLAGS += -Wa,-gdwarf-2
@@ -1173,7 +1178,7 @@ distclean: mrproper
# Packaging of the kernel to various formats
# ---------------------------------------------------------------------------
# rpm target kept for backward compatibility
-package-dir := $(srctree)/scripts/package
+package-dir := scripts/package
%src-pkg: FORCE
$(Q)$(MAKE) $(build)=$(package-dir) $@
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 245058b3b0ef..290f02ee0157 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,6 +6,7 @@ config ARM
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
@@ -312,7 +313,7 @@ config ARCH_MULTIPLATFORM
config ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
select ARM_AMBA
- select ARM_PATCH_PHYS_VIRT
+ select ARM_PATCH_PHYS_VIRT if MMU
select AUTO_ZRELADDR
select COMMON_CLK
select COMMON_CLK_VERSATILE
@@ -658,7 +659,7 @@ config ARCH_MSM
config ARCH_SHMOBILE_LEGACY
bool "Renesas ARM SoCs (non-multiplatform)"
select ARCH_SHMOBILE
- select ARM_PATCH_PHYS_VIRT
+ select ARM_PATCH_PHYS_VIRT if MMU
select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index ecb267767cf5..e2156a583de7 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -529,8 +529,8 @@
serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
0 0 1 2
>;
- tx-num-evt = <1>;
- rx-num-evt = <1>;
+ tx-num-evt = <32>;
+ rx-num-evt = <32>;
};
&tps {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index ab9a34ce524c..80a3b215e7d6 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -560,8 +560,8 @@
serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
0 0 1 2
>;
- tx-num-evt = <1>;
- rx-num-evt = <1>;
+ tx-num-evt = <32>;
+ rx-num-evt = <32>;
};
&tscadc {
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index 8a0a72dc7dd7..a1a0cc5eb35c 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -105,10 +105,16 @@
&cpsw_emac0 {
phy_id = <&davinci_mdio>, <0>;
+ phy-mode = "rmii";
};
&cpsw_emac1 {
phy_id = <&davinci_mdio>, <1>;
+ phy-mode = "rmii";
+};
+
+&phy_sel {
+ rmii-clock-ext;
};
&elm {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 287795985e32..b84bac5bada4 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -925,7 +925,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x00100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
+ clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
<&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled";
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index d6133f497207..2c0d6ea3ab41 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -1045,6 +1045,8 @@
reg = <0x00500000 0x80000
0xf803c000 0x400>;
interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&usb>, <&udphs_clk>;
+ clock-names = "hclk", "pclk";
status = "disabled";
ep0 {
@@ -1122,6 +1124,7 @@
compatible = "atmel,at91sam9rl-pwm";
reg = <0xf8034000 0x300>;
interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
+ clocks = <&pwm_clk>;
#pwm-cells = <3>;
status = "disabled";
};
@@ -1153,8 +1156,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
- <&uhpck>;
+ clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 4adc28039c30..83089540e324 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -240,6 +240,7 @@
regulator-name = "ldo3";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
regulator-boot-on;
};
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index c90c76de84d6..dc7a292fe939 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -673,10 +673,12 @@
l3_iclk_div: l3_iclk_div {
#clock-cells = <0>;
- compatible = "fixed-factor-clock";
+ compatible = "ti,divider-clock";
+ ti,max-div = <2>;
+ ti,bit-shift = <4>;
+ reg = <0x0100>;
clocks = <&dpll_core_h12x2_ck>;
- clock-mult = <1>;
- clock-div = <1>;
+ ti,index-power-of-two;
};
l4_root_clk_div: l4_root_clk_div {
@@ -684,7 +686,7 @@
compatible = "fixed-factor-clock";
clocks = <&l3_iclk_div>;
clock-mult = <1>;
- clock-div = <1>;
+ clock-div = <2>;
};
video1_clk2_div: video1_clk2_div {
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index fbaf426d2daa..d9cb972d9914 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -554,7 +554,7 @@
interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>, <0 41 0>;
clocks = <&clock CLK_PWM>;
clock-names = "timers";
- #pwm-cells = <2>;
+ #pwm-cells = <3>;
status = "disabled";
};
@@ -608,6 +608,7 @@
clocks = <&clock CLK_SCLK_FIMD0>, <&clock CLK_FIMD0>;
clock-names = "sclk_fimd", "fimd";
samsung,power-domain = <&pd_lcd0>;
+ samsung,sysreg = <&sys_reg>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index 79d0608d6dcc..fdead12952a1 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -87,6 +87,7 @@
reg = <0x14400000 0x40000>;
interrupt-names = "fifo", "vsync", "lcd_sys";
interrupts = <18 4>, <18 5>, <18 6>;
+ samsung,sysreg = <&sysreg_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index e38532271ef9..43004665a48c 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -167,7 +167,7 @@
compatible = "samsung,exynos5420-audss-clock";
reg = <0x03810000 0x0C>;
#clock-cells = <1>;
- clocks = <&clock CLK_FIN_PLL>, <&clock CLK_FOUT_EPLL>,
+ clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MAU_EPLL>,
<&clock CLK_SCLK_MAUDIO0>, <&clock CLK_SCLK_MAUPCM0>;
clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
};
@@ -260,6 +260,9 @@
mfc_pd: power-domain@10044060 {
compatible = "samsung,exynos4210-pd";
reg = <0x10044060 0x20>;
+ clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
+ <&clock CLK_MOUT_USER_ACLK333>;
+ clock-names = "oscclk", "pclk0", "clk0";
};
disp_pd: power-domain@100440C0 {
@@ -517,6 +520,26 @@
phy-names = "dp";
};
+ mipi_phy: video-phy@10040714 {
+ compatible = "samsung,s5pv210-mipi-video-phy";
+ reg = <0x10040714 12>;
+ #phy-cells = <1>;
+ };
+
+ dsi@14500000 {
+ compatible = "samsung,exynos5410-mipi-dsi";
+ reg = <0x14500000 0x10000>;
+ interrupts = <0 82 0>;
+ samsung,power-domain = <&disp_pd>;
+ phys = <&mipi_phy 1>;
+ phy-names = "dsim";
+ clocks = <&clock CLK_DSIM1>, <&clock CLK_SCLK_MIPI1>;
+ clock-names = "bus_clk", "pll_clk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
fimd: fimd@14400000 {
samsung,power-domain = <&disp_pd>;
clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>;
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
index ab1116d086be..83a5b8685bd9 100644
--- a/arch/arm/boot/dts/hi3620.dtsi
+++ b/arch/arm/boot/dts/hi3620.dtsi
@@ -73,7 +73,7 @@
L2: l2-cache {
compatible = "arm,pl310-cache";
- reg = <0xfc10000 0x100000>;
+ reg = <0x100000 0x100000>;
interrupts = <0 15 4>;
cache-unified;
cache-level = <2>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 1fe45d1f75ec..b15f1a77d684 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -353,7 +353,7 @@
};
twl_power: power {
- compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
+ compatible = "ti,twl4030-power-n900";
ti,use_poweroff;
};
};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 8d7ffaeff6e0..79f68acfd5d4 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -540,9 +540,9 @@
#clock-cells = <0>;
clock-output-names = "sd1";
};
- sd2_clk: sd3_clk@e615007c {
+ sd2_clk: sd3_clk@e615026c {
compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
- reg = <0 0xe615007c 0 4>;
+ reg = <0 0xe615026c 0 4>;
clocks = <&pll1_div2_clk>;
#clock-cells = <0>;
clock-output-names = "sd2";
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index f557feb997f4..90d8b6c7a205 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -4,7 +4,7 @@
*/
/dts-v1/;
-/include/ "ste-nomadik-stn8815.dtsi"
+#include "ste-nomadik-stn8815.dtsi"
/ {
model = "Calao Systems USB-S8815";
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index d316c955bd5f..dbcf521b017f 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -1,7 +1,9 @@
/*
* Device Tree for the ST-Ericsson Nomadik 8815 STn8815 SoC
*/
-/include/ "skeleton.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include "skeleton.dtsi"
/ {
#address-cells = <1>;
@@ -842,8 +844,7 @@
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
- cd-gpios = <&gpio3 15 0x1>;
- cd-inverted;
+ cd-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
vmmc-supply = <&vmmc_regulator>;
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 4522366da759..15468fbbdea3 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
dst += AES_BLOCK_SIZE;
} while (--blocks);
}
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->dec, walk.iv);
kernel_neon_end();
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
while (walk.nbytes) {
u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
} while (--blocks);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->enc, walk.iv);
kernel_neon_end();
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->dec, walk.iv);
kernel_neon_end();
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 060a75e99263..0406cb3f1af7 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -50,6 +50,7 @@ struct machine_desc {
struct smp_operations *smp; /* SMP operations */
bool (*smp_init)(void);
void (*fixup)(struct tag *, char **);
+ void (*dt_fixup)(void);
void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index e94a157ddff1..11c54de9f8cf 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -212,7 +212,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
mdesc_best = &__mach_desc_GENERIC_DT;
#endif
- if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys)))
+ if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
return NULL;
mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
@@ -237,6 +237,12 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
dump_machine_table(); /* does not return */
}
+ /* We really don't want to do this, but sometimes firmware provides buggy data */
+ if (mdesc->dt_fixup)
+ mdesc->dt_fixup();
+
+ early_init_dt_scan_nodes();
+
/* Change machine number to match the mdesc we're using */
__machine_arch_type = mdesc->nr;
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index a5599cfc43cb..2b32978ae905 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -94,13 +94,19 @@ ENTRY(iwmmxt_task_enable)
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
+ bl concan_save
- teq r1, #0 @ test for last ownership
- mov lr, r9 @ normal exit from exception
- beq concan_load @ no owner, skip save
+#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+#endif
+4: dec_preempt_count r10, r3
+ mov pc, r9 @ normal exit from exception
concan_save:
+ teq r1, #0 @ test for last ownership
+ beq concan_load @ no owner, skip save
+
tmrc r2, wCon
@ CUP? wCx
@@ -138,7 +144,7 @@ concan_dump:
wstrd wR15, [r1, #MMX_WR15]
2: teq r0, #0 @ anything to load?
- beq 3f
+ moveq pc, lr @ if not, return
concan_load:
@@ -171,14 +177,9 @@ concan_load:
@ clear CUP/MUP (only if r1 != 0)
teq r1, #0
mov r2, #0
- beq 3f
- tmcr wCon, r2
+ moveq pc, lr
-3:
-#ifdef CONFIG_PREEMPT_COUNT
- get_thread_info r10
-#endif
-4: dec_preempt_count r10, r3
+ tmcr wCon, r2
mov pc, lr
/*
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 778c2f7024ff..a74b53c1b7df 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -160,12 +160,16 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
static struct undef_hook kgdb_brkpt_hook = {
.instr_mask = 0xffffffff,
.instr_val = KGDB_BREAKINST,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = SVC_MODE,
.fn = kgdb_brk_fn
};
static struct undef_hook kgdb_compiled_brkpt_hook = {
.instr_mask = 0xffffffff,
.instr_val = KGDB_COMPILED_BREAK,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = SVC_MODE,
.fn = kgdb_compiled_brk_fn
};
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index 9db4b659d03e..cb1424240ff6 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -74,8 +74,6 @@ void kprobe_arm_test_cases(void)
TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\
TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\
- TEST_RR( op s " r12, pc" ", r",14,val, ", ror r",14,7,"")\
- TEST_RR( op s " r14, r",0, val, ", pc" ", lsl r",14,8,"")\
TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \
TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \
TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \
@@ -103,8 +101,6 @@ void kprobe_arm_test_cases(void)
TEST_RRR( op "ge r",11,VAL1,", r",14,N(val),", asr r",7, 6,"") \
TEST_RR( op "le r13" ", r",14,val, ", ror r",14,7,"") \
TEST_RR( op "gt r",0, val, ", r13" ", lsl r",14,8,"") \
- TEST_RR( op " pc" ", r",14,val, ", ror r",14,7,"") \
- TEST_RR( op " r",0, val, ", pc" ", lsl r",14,8,"") \
TEST_R( op "eq r",11,VAL1,", #0xf5") \
TEST_R( op "ne r",0, VAL1,", #0xf5000000") \
TEST_R( op " r",8, VAL2,", #0x000af000")
@@ -125,7 +121,6 @@ void kprobe_arm_test_cases(void)
TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \
TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \
TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \
- TEST_R( op "le" s " r14, pc" ", lsl r",14,8,"") \
TEST( op "eq" s " r0, #0xf5") \
TEST( op "ne" s " r11, #0xf5000000") \
TEST( op s " r7, #0x000af000") \
@@ -159,12 +154,19 @@ void kprobe_arm_test_cases(void)
TEST_SUPPORTED("cmp pc, #0x1000");
TEST_SUPPORTED("cmp sp, #0x1000");
- /* Data-processing with PC as shift*/
+ /* Data-processing with PC and a shift count in a register */
TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) " @ cmp r12, r14, asl pc")
TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) " @ mov r12, r14, asl pc")
TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) " @ add r10, r12, r14, asl pc")
-
- /* Data-processing with PC as shift*/
+ TEST_UNSUPPORTED(__inst_arm(0xe151021f) " @ cmp r1, pc, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe17f0211) " @ cmn pc, r1, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe1a0121f) " @ mov r1, pc, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe1a0f211) " @ mov pc, r1, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe042131f) " @ sub r1, r2, pc, lsl r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe1cf1312) " @ bic r1, pc, r2, lsl r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe081f312) " @ add pc, r1, r2, lsl r3")
+
+ /* Data-processing with PC as a target and status registers updated */
TEST_UNSUPPORTED("movs pc, r1")
TEST_UNSUPPORTED("movs pc, r1, lsl r2")
TEST_UNSUPPORTED("movs pc, #0x10000")
@@ -187,14 +189,14 @@ void kprobe_arm_test_cases(void)
TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"")
TEST_BF_R ("add pc, r",14,2f-1f-8,", pc")
TEST_BF_R ("mov pc, r",0,2f,"")
- TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
+ TEST_BF_R ("add pc, pc, r",14,(2f-1f-8)*2,", asr #1")
TEST_BB( "sub pc, pc, #1b-2b+8")
#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
#endif
TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
- TEST_RR( "add pc, pc, r",10,-2,", asl r",11,1,"")
+ TEST_R( "add pc, pc, r",10,-2,", asl #1")
#ifdef CONFIG_THUMB2_KERNEL
TEST_ARM_TO_THUMB_INTERWORK_R("add pc, pc, r",0,3f-1f-8+1,"")
TEST_ARM_TO_THUMB_INTERWORK_R("sub pc, r",0,3f+8+1,", #8")
@@ -216,6 +218,7 @@ void kprobe_arm_test_cases(void)
TEST_BB_R("bx r",7,2f,"")
TEST_BF_R("bxeq r",14,2f,"")
+#if __LINUX_ARM_ARCH__ >= 5
TEST_R("clz r0, r",0, 0x0,"")
TEST_R("clzeq r7, r",14,0x1,"")
TEST_R("clz lr, r",7, 0xffffffff,"")
@@ -337,6 +340,7 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2")
TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2")
TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc")
+#endif
TEST_GROUP("Multiply and multiply-accumulate")
@@ -559,6 +563,7 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED("ldrsht r1, [r2], #48")
#endif
+#if __LINUX_ARM_ARCH__ >= 5
TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
@@ -595,6 +600,7 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!")
TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) " @ ldrd pc, [r9], #48")
TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) " @ ldrd lr, [r9], #48")
+#endif
TEST_GROUP("Miscellaneous")
@@ -1227,7 +1233,9 @@ void kprobe_arm_test_cases(void)
TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0")
COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
+#if __LINUX_ARM_ARCH__ >= 5
COPROCESSOR_INSTRUCTIONS_MC_MR("",e)
+#endif
TEST_UNSUPPORTED("svc 0")
TEST_UNSUPPORTED("svc 0xffffff")
@@ -1287,7 +1295,9 @@ void kprobe_arm_test_cases(void)
TEST( "blx __dummy_thumb_subroutine_odd")
#endif /* __LINUX_ARM_ARCH__ >= 6 */
+#if __LINUX_ARM_ARCH__ >= 5
COPROCESSOR_INSTRUCTIONS_ST_LD("2",f)
+#endif
#if __LINUX_ARM_ARCH__ >= 6
COPROCESSOR_INSTRUCTIONS_MC_MR("2",f)
#endif
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
index 379639998d5a..08d731294bcd 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/kernel/kprobes-test.c
@@ -225,6 +225,7 @@ static int pre_handler_called;
static int post_handler_called;
static int jprobe_func_called;
static int kretprobe_handler_called;
+static int tests_failed;
#define FUNC_ARG1 0x12345678
#define FUNC_ARG2 0xabcdef
@@ -461,6 +462,13 @@ static int run_api_tests(long (*func)(long, long))
pr_info(" jprobe\n");
ret = test_jprobe(func);
+#if defined(CONFIG_THUMB2_KERNEL) && !defined(MODULE)
+ if (ret == -EINVAL) {
+ pr_err("FAIL: Known longtime bug with jprobe on Thumb kernels\n");
+ tests_failed = ret;
+ ret = 0;
+ }
+#endif
if (ret < 0)
return ret;
@@ -1672,6 +1680,8 @@ static int __init run_all_tests(void)
out:
if (ret == 0)
+ ret = tests_failed;
+ if (ret == 0)
pr_info("Finished kprobe tests OK\n");
else
pr_err("kprobe tests failed\n");
diff --git a/arch/arm/kernel/probes-arm.c b/arch/arm/kernel/probes-arm.c
index 51a13a027989..8eaef81d8344 100644
--- a/arch/arm/kernel/probes-arm.c
+++ b/arch/arm/kernel/probes-arm.c
@@ -341,12 +341,12 @@ static const union decode_item arm_cccc_000x_table[] = {
/* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
/* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
DECODE_EMULATEX (0x0f900090, 0x01100010, PROBES_DATA_PROCESSING_REG,
- REGS(ANY, 0, NOPC, 0, ANY)),
+ REGS(NOPC, 0, NOPC, 0, NOPC)),
/* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
/* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
DECODE_EMULATEX (0x0fa00090, 0x01a00010, PROBES_DATA_PROCESSING_REG,
- REGS(0, ANY, NOPC, 0, ANY)),
+ REGS(0, NOPC, NOPC, 0, NOPC)),
/* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
/* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
@@ -359,7 +359,7 @@ static const union decode_item arm_cccc_000x_table[] = {
/* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
/* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
DECODE_EMULATEX (0x0e000090, 0x00000010, PROBES_DATA_PROCESSING_REG,
- REGS(ANY, ANY, NOPC, 0, ANY)),
+ REGS(NOPC, NOPC, NOPC, 0, NOPC)),
DECODE_END
};
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 9d853189028b..e35d880f9773 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
-static inline const int cpu_corepower_flags(void)
+static inline int cpu_corepower_flags(void)
{
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
}
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index f38cf7c110cc..66c9b9614f3c 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -173,10 +173,8 @@ static struct platform_device exynos_cpuidle = {
void __init exynos_cpuidle_init(void)
{
- if (soc_is_exynos5440())
- return;
-
- platform_device_register(&exynos_cpuidle);
+ if (soc_is_exynos4210() || soc_is_exynos5250())
+ platform_device_register(&exynos_cpuidle);
}
void __init exynos_cpufreq_init(void)
@@ -297,7 +295,7 @@ static void __init exynos_dt_machine_init(void)
* This is called from smp_prepare_cpus if we've built for SMP, but
* we still need to set it up for PM and firmware ops if not.
*/
- if (!IS_ENABLED(SMP))
+ if (!IS_ENABLED(CONFIG_SMP))
exynos_sysram_init();
exynos_cpuidle_init();
@@ -337,6 +335,15 @@ static void __init exynos_reserve(void)
#endif
}
+static void __init exynos_dt_fixup(void)
+{
+ /*
+ * Some versions of uboot pass garbage entries in the memory node,
+ * use the old CONFIG_ARM_NR_BANKS
+ */
+ of_fdt_limit_memory(8);
+}
+
DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
/* Maintainer: Thomas Abraham <[email protected]> */
/* Maintainer: Kukjin Kim <[email protected]> */
@@ -350,4 +357,5 @@ DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
.dt_compat = exynos_dt_compat,
.restart = exynos_restart,
.reserve = exynos_reserve,
+ .dt_fixup = exynos_dt_fixup,
MACHINE_END
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index eb91d2350f8c..e8797bb78871 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -57,8 +57,13 @@ static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
boot_reg = sysram_ns_base_addr + 0x1c;
- if (!soc_is_exynos4212() && !soc_is_exynos3250())
- boot_reg += 4*cpu;
+ /*
+ * Almost all Exynos-series of SoCs that run in secure mode don't need
+ * additional offset for every CPU, with Exynos4412 being the only
+ * exception.
+ */
+ if (soc_is_exynos4412())
+ boot_reg += 4 * cpu;
__raw_writel(boot_addr, boot_reg);
return 0;
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c
index 8a134d019cb3..920a4baa53cd 100644
--- a/arch/arm/mach-exynos/hotplug.c
+++ b/arch/arm/mach-exynos/hotplug.c
@@ -40,15 +40,17 @@ static inline void cpu_leave_lowpower(void)
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
+ u32 mpidr = cpu_logical_map(cpu);
+ u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
for (;;) {
- /* make cpu1 to be turned off at next WFI command */
- if (cpu == 1)
- exynos_cpu_power_down(cpu);
+ /* Turn the CPU off on next WFI instruction. */
+ exynos_cpu_power_down(core_id);
wfi();
- if (pen_release == cpu_logical_map(cpu)) {
+ if (pen_release == core_id) {
/*
* OK, proper wakeup, we're done
*/
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 1c8d31e39520..50b9aad5e27b 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -90,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
- unsigned long phys_cpu = cpu_logical_map(cpu);
+ u32 mpidr = cpu_logical_map(cpu);
+ u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
int ret = -ENOSYS;
/*
@@ -104,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*
- * Note that "pen_release" is the hardware CPU ID, whereas
+ * Note that "pen_release" is the hardware CPU core ID, whereas
* "cpu" is Linux's internal ID.
*/
- write_pen_release(phys_cpu);
+ write_pen_release(core_id);
- if (!exynos_cpu_power_state(cpu)) {
- exynos_cpu_power_up(cpu);
+ if (!exynos_cpu_power_state(core_id)) {
+ exynos_cpu_power_up(core_id);
timeout = 10;
/* wait max 10 ms until cpu1 is on */
- while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) {
+ while (exynos_cpu_power_state(core_id)
+ != S5P_CORE_LOCAL_PWR_EN) {
if (timeout-- == 0)
break;
@@ -145,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Try to set boot address using firmware first
* and fall back to boot register if it fails.
*/
- ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+ ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
if (ret && ret != -ENOSYS)
goto fail;
if (ret == -ENOSYS) {
- void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+ void __iomem *boot_reg = cpu_boot_reg(core_id);
if (IS_ERR(boot_reg)) {
ret = PTR_ERR(boot_reg);
goto fail;
}
- __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+ __raw_writel(boot_addr, cpu_boot_reg(core_id));
}
- call_firmware_op(cpu_boot, phys_cpu);
+ call_firmware_op(cpu_boot, core_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
@@ -227,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
* boot register if it fails.
*/
for (i = 1; i < max_cpus; ++i) {
- unsigned long phys_cpu;
unsigned long boot_addr;
+ u32 mpidr;
+ u32 core_id;
int ret;
- phys_cpu = cpu_logical_map(i);
+ mpidr = cpu_logical_map(i);
+ core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
boot_addr = virt_to_phys(exynos4_secondary_startup);
- ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+ ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
if (ret && ret != -ENOSYS)
break;
if (ret == -ENOSYS) {
- void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+ void __iomem *boot_reg = cpu_boot_reg(core_id);
if (IS_ERR(boot_reg))
break;
- __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+ __raw_writel(boot_addr, cpu_boot_reg(core_id));
}
}
}
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index fe6570ebbdde..797cb134bfff 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_domain.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -24,6 +25,8 @@
#include "regs-pmu.h"
+#define MAX_CLK_PER_DOMAIN 4
+
/*
* Exynos specific wrapper around the generic power domain
*/
@@ -32,6 +35,9 @@ struct exynos_pm_domain {
char const *name;
bool is_off;
struct generic_pm_domain pd;
+ struct clk *oscclk;
+ struct clk *clk[MAX_CLK_PER_DOMAIN];
+ struct clk *pclk[MAX_CLK_PER_DOMAIN];
};
static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -44,6 +50,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
pd = container_of(domain, struct exynos_pm_domain, pd);
base = pd->base;
+ /* Set oscclk before powering off a domain*/
+ if (!power_on) {
+ int i;
+
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->clk[i]))
+ break;
+ if (clk_set_parent(pd->clk[i], pd->oscclk))
+ pr_err("%s: error setting oscclk as parent to clock %d\n",
+ pd->name, i);
+ }
+ }
+
pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0;
__raw_writel(pwr, base);
@@ -60,6 +79,20 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
cpu_relax();
usleep_range(80, 100);
}
+
+ /* Restore clocks after powering on a domain*/
+ if (power_on) {
+ int i;
+
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->clk[i]))
+ break;
+ if (clk_set_parent(pd->clk[i], pd->pclk[i]))
+ pr_err("%s: error setting parent to clock%d\n",
+ pd->name, i);
+ }
+ }
+
return 0;
}
@@ -152,9 +185,11 @@ static __init int exynos4_pm_init_power_domain(void)
for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
struct exynos_pm_domain *pd;
- int on;
+ int on, i;
+ struct device *dev;
pdev = of_find_device_by_node(np);
+ dev = &pdev->dev;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
@@ -170,6 +205,30 @@ static __init int exynos4_pm_init_power_domain(void)
pd->pd.power_on = exynos_pd_power_on;
pd->pd.of_node = np;
+ pd->oscclk = clk_get(dev, "oscclk");
+ if (IS_ERR(pd->oscclk))
+ goto no_clk;
+
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ char clk_name[8];
+
+ snprintf(clk_name, sizeof(clk_name), "clk%d", i);
+ pd->clk[i] = clk_get(dev, clk_name);
+ if (IS_ERR(pd->clk[i]))
+ break;
+ snprintf(clk_name, sizeof(clk_name), "pclk%d", i);
+ pd->pclk[i] = clk_get(dev, clk_name);
+ if (IS_ERR(pd->pclk[i])) {
+ clk_put(pd->clk[i]);
+ pd->clk[i] = ERR_PTR(-EINVAL);
+ break;
+ }
+ }
+
+ if (IS_ERR(pd->clk[0]))
+ clk_put(pd->oscclk);
+
+no_clk:
platform_set_drvdata(pdev, pd);
on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c
index 4ba587da89d2..84acdfd1d715 100644
--- a/arch/arm/mach-imx/clk-gate2.c
+++ b/arch/arm/mach-imx/clk-gate2.c
@@ -67,8 +67,12 @@ static void clk_gate2_disable(struct clk_hw *hw)
spin_lock_irqsave(gate->lock, flags);
- if (gate->share_count && --(*gate->share_count) > 0)
- goto out;
+ if (gate->share_count) {
+ if (WARN_ON(*gate->share_count == 0))
+ goto out;
+ else if (--(*gate->share_count) > 0)
+ goto out;
+ }
reg = readl(gate->reg);
reg &= ~(3 << gate->bit_idx);
@@ -78,19 +82,26 @@ out:
spin_unlock_irqrestore(gate->lock, flags);
}
-static int clk_gate2_is_enabled(struct clk_hw *hw)
+static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
{
- u32 reg;
- struct clk_gate2 *gate = to_clk_gate2(hw);
+ u32 val = readl(reg);
- reg = readl(gate->reg);
-
- if (((reg >> gate->bit_idx) & 1) == 1)
+ if (((val >> bit_idx) & 1) == 1)
return 1;
return 0;
}
+static int clk_gate2_is_enabled(struct clk_hw *hw)
+{
+ struct clk_gate2 *gate = to_clk_gate2(hw);
+
+ if (gate->share_count)
+ return !!(*gate->share_count);
+ else
+ return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
+}
+
static struct clk_ops clk_gate2_ops = {
.enable = clk_gate2_enable,
.disable = clk_gate2_disable,
@@ -116,6 +127,10 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
gate->bit_idx = bit_idx;
gate->flags = clk_gate2_flags;
gate->lock = lock;
+
+ /* Initialize share_count per hardware state */
+ if (share_count)
+ *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0;
gate->share_count = share_count;
init.name = name;
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 8e795dea02ec..8556c787e59c 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", };
static const char *lvds_sels[] = {
"dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
"pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
- "pcie_ref", "sata_ref",
+ "pcie_ref_125m", "sata_ref_100m",
};
enum mx6q_clks {
@@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
/* All existing boards with PCIe use LVDS1 */
if (IS_ENABLED(CONFIG_PCI_IMX6))
- clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
+ clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
/* Set initial power mode */
imx6q_set_lpm(WAIT_CLOCKED);
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 477202fd39cc..2bdc3233abe2 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
.notifier_call = mvebu_hwcc_notifier,
};
+static struct notifier_block mvebu_hwcc_pci_nb = {
+ .notifier_call = mvebu_hwcc_notifier,
+};
+
static void __init armada_370_coherency_init(struct device_node *np)
{
struct resource res;
@@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
{
if (coherency_available())
bus_register_notifier(&pci_bus_type,
- &mvebu_hwcc_nb);
+ &mvebu_hwcc_pci_nb);
return 0;
}
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index 5925366bc03c..da5bb292b91c 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -15,6 +15,8 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
+
__CPUINIT
#define CPU_RESUME_ADDR_REG 0xf10182d4
@@ -22,13 +24,18 @@
.global armada_375_smp_cpu1_enable_code_end
armada_375_smp_cpu1_enable_code_start:
- ldr r0, [pc, #4]
+ARM_BE8(setend be)
+ adr r0, 1f
+ ldr r0, [r0]
ldr r1, [r0]
+ARM_BE8(rev r1, r1)
mov pc, r1
+1:
.word CPU_RESUME_ADDR_REG
armada_375_smp_cpu1_enable_code_end:
ENTRY(mvebu_cortex_a9_secondary_startup)
+ARM_BE8(setend be)
bl v7_invalidate_l1
b secondary_startup
ENDPROC(mvebu_cortex_a9_secondary_startup)
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index a1d407c0febe..25aa8237d668 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -201,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
/* Test the CR_C bit and set it if it was cleared */
asm volatile(
- "mrc p15, 0, %0, c1, c0, 0 \n\t"
- "tst %0, #(1 << 2) \n\t"
- "orreq %0, %0, #(1 << 2) \n\t"
- "mcreq p15, 0, %0, c1, c0, 0 \n\t"
+ "mrc p15, 0, r0, c1, c0, 0 \n\t"
+ "tst r0, #(1 << 2) \n\t"
+ "orreq r0, r0, #(1 << 2) \n\t"
+ "mcreq p15, 0, r0, c1, c0, 0 \n\t"
"isb "
- : : "r" (0));
+ : : : "r0");
pr_warn("Failed to suspend the system\n");
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 332af927f4d3..67fd26a18441 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -76,7 +76,7 @@
* (assuming that it is counting N upwards), or -2 if the enclosing loop
* should skip to the next iteration (again assuming N is increasing).
*/
-static int _dpll_test_fint(struct clk_hw_omap *clk, u8 n)
+static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n)
{
struct dpll_data *dd;
long fint, fint_min, fint_max;
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 04dab2fcf862..ee6c784cd6b7 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -26,11 +26,14 @@
#define OMAP3430_EN_WDT3_SHIFT 12
#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK (1 << 0)
#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT 0
+#define OMAP3430_IVA2_DPLL_FREQSEL_SHIFT 4
#define OMAP3430_IVA2_DPLL_FREQSEL_MASK (0xf << 4)
#define OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT 3
+#define OMAP3430_EN_IVA2_DPLL_SHIFT 0
#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
#define OMAP3430_ST_IVA2_SHIFT 0
#define OMAP3430_ST_IVA2_CLK_MASK (1 << 0)
+#define OMAP3430_AUTO_IVA2_DPLL_SHIFT 0
#define OMAP3430_AUTO_IVA2_DPLL_MASK (0x7 << 0)
#define OMAP3430_IVA2_CLK_SRC_SHIFT 19
#define OMAP3430_IVA2_CLK_SRC_WIDTH 3
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index b2d252bf4a53..dc571f1d3b8a 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -162,7 +162,8 @@ static inline void omap3xxx_restart(enum reboot_mode mode, const char *cmd)
}
#endif
-#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
void omap44xx_restart(enum reboot_mode mode, const char *cmd);
#else
static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 592ba0a0ecf3..b6f8f348296e 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -297,33 +297,6 @@ static void omap_init_audio(void)
static inline void omap_init_audio(void) {}
#endif
-#if defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI_MODULE)
-
-static struct platform_device omap_hdmi_audio = {
- .name = "omap-hdmi-audio",
- .id = -1,
-};
-
-static void __init omap_init_hdmi_audio(void)
-{
- struct omap_hwmod *oh;
- struct platform_device *pdev;
-
- oh = omap_hwmod_lookup("dss_hdmi");
- if (!oh)
- return;
-
- pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0);
- WARN(IS_ERR(pdev),
- "Can't build omap_device for omap-hdmi-audio-dai.\n");
-
- platform_device_register(&omap_hdmi_audio);
-}
-#else
-static inline void omap_init_hdmi_audio(void) {}
-#endif
-
#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -459,7 +432,6 @@ static int __init omap2_init_devices(void)
*/
omap_init_audio();
omap_init_camera();
- omap_init_hdmi_audio();
omap_init_mbox();
/* If dtb is there, the devices will be created dynamically */
if (!of_have_populated_dt()) {
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index b8208b4b1bd9..f7492df1cbba 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -29,6 +29,7 @@
#ifdef CONFIG_TIDSPBRIDGE_DVFS
#include "omap-pm.h"
#endif
+#include "soc.h"
#include <linux/platform_data/dsp-omap.h>
@@ -59,6 +60,9 @@ void __init omap_dsp_reserve_sdram_memblock(void)
phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
phys_addr_t paddr;
+ if (!cpu_is_omap34xx())
+ return;
+
if (!size)
return;
@@ -83,6 +87,9 @@ static int __init omap_dsp_init(void)
int err = -ENOMEM;
struct omap_dsp_platform_data *pdata = &omap_dsp_pdata;
+ if (!cpu_is_omap34xx())
+ return 0;
+
pdata->phys_mempool_base = omap_dsp_get_mempool_base();
if (pdata->phys_mempool_base) {
@@ -115,6 +122,9 @@ module_init(omap_dsp_init);
static void __exit omap_dsp_exit(void)
{
+ if (!cpu_is_omap34xx())
+ return;
+
platform_device_unregister(omap_dsp_pdev);
}
module_exit(omap_dsp_exit);
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 17cd39360afe..93914d220069 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
soc_is_omap54xx() || soc_is_dra7xx())
return 1;
+ if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
+ ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
+ if (cpu_is_omap24xx())
+ return 0;
+ else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
+ return 0;
+ else
+ return 1;
+ }
+
/* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
* which require H/W based ECC error detection */
if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
(ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
return 0;
- /*
- * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
- * and AM33xx derivates. Other chips may be added if confirmed to work.
- */
- if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
- (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
- return 0;
-
/* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
return 1;
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 2c0c2816900f..8bc13380f0a0 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1615,7 +1615,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
return ret;
}
- for_each_child_of_node(pdev->dev.of_node, child) {
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
if (!child->name)
continue;
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 539e8106eb96..a0fe747634c1 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -168,6 +168,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
break;
+ case L310_POWER_CTRL:
+ pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
+ return;
+
default:
WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
return;
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 20b4398cec05..284324f2b98a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1268,9 +1268,6 @@ static struct omap_hwmod_class dra7xx_sata_hwmod_class = {
};
/* sata */
-static struct omap_hwmod_opt_clk sata_opt_clks[] = {
- { .role = "ref_clk", .clk = "sata_ref_clk" },
-};
static struct omap_hwmod dra7xx_sata_hwmod = {
.name = "sata",
@@ -1278,6 +1275,7 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
.clkdm_name = "l3init_clkdm",
.flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
.main_clk = "func_48m_fclk",
+ .mpu_rt_idx = 1,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
@@ -1285,8 +1283,6 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
.modulemode = MODULEMODE_SWCTRL,
},
},
- .opt_clks = sata_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(sata_opt_clks),
};
/*
@@ -1731,8 +1727,20 @@ static struct omap_hwmod dra7xx_uart6_hwmod = {
*
*/
+static struct omap_hwmod_class_sysconfig dra7xx_usb_otg_ss_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = (SYSC_HAS_DMADISABLE | SYSC_HAS_MIDLEMODE |
+ SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
static struct omap_hwmod_class dra7xx_usb_otg_ss_hwmod_class = {
.name = "usb_otg_ss",
+ .sysc = &dra7xx_usb_otg_ss_sysc,
};
/* usb_otg_ss1 */
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index 106132db532b..cbefbd7cfdb5 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -35,6 +35,8 @@
#define OMAP3430_LOGICSTATEST_MASK (1 << 2)
#define OMAP3430_LASTLOGICSTATEENTERED_MASK (1 << 2)
#define OMAP3430_LASTPOWERSTATEENTERED_MASK (0x3 << 0)
+#define OMAP3430_GRPSEL_MCBSP5_MASK (1 << 10)
+#define OMAP3430_GRPSEL_MCBSP1_MASK (1 << 9)
#define OMAP3630_GRPSEL_UART4_MASK (1 << 18)
#define OMAP3430_GRPSEL_GPIO6_MASK (1 << 17)
#define OMAP3430_GRPSEL_GPIO5_MASK (1 << 16)
@@ -42,6 +44,10 @@
#define OMAP3430_GRPSEL_GPIO3_MASK (1 << 14)
#define OMAP3430_GRPSEL_GPIO2_MASK (1 << 13)
#define OMAP3430_GRPSEL_UART3_MASK (1 << 11)
+#define OMAP3430_GRPSEL_GPT8_MASK (1 << 9)
+#define OMAP3430_GRPSEL_GPT7_MASK (1 << 8)
+#define OMAP3430_GRPSEL_GPT6_MASK (1 << 7)
+#define OMAP3430_GRPSEL_GPT5_MASK (1 << 6)
#define OMAP3430_GRPSEL_MCBSP4_MASK (1 << 2)
#define OMAP3430_GRPSEL_MCBSP3_MASK (1 << 1)
#define OMAP3430_GRPSEL_MCBSP2_MASK (1 << 0)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 076172b69422..7c3fb41a462e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -664,7 +664,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
{
- unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
+ unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
if (rev >= L310_CACHE_ID_RTL_R2P0) {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4c88935654ca..1f88db06b133 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -461,12 +461,21 @@ void __init dma_contiguous_remap(void)
map.type = MT_MEMORY_DMA_READY;
/*
- * Clear previous low-memory mapping
+ * Clear previous low-memory mapping to ensure that the
+ * TLB does not see any conflicting entries, then flush
+ * the TLB of the old entries before creating new mappings.
+ *
+ * This ensures that any speculatively loaded TLB entries
+ * (even though they may be rare) can not cause any problems,
+ * and ensures that this code is architecturally compliant.
*/
for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
+ flush_tlb_kernel_range(__phys_to_virt(start),
+ __phys_to_virt(end));
+
iotable_init(&map, 1);
}
}
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 8e0e52eb76b5..c447ec70e868 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -9,6 +9,11 @@
#include <asm/sections.h>
#include <asm/system_info.h>
+/*
+ * Note: accesses outside of the kernel image and the identity map area
+ * are not supported on any CPU using the idmap tables as its current
+ * page tables.
+ */
pgd_t *idmap_pgd;
phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
@@ -25,6 +30,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pr_warning("Failed to allocate identity pmd.\n");
return;
}
+ /*
+ * Copy the original PMD to ensure that the PMD entries for
+ * the kernel image are preserved.
+ */
+ if (!pud_none(*pud))
+ memcpy(pmd, pmd_offset(pud, 0),
+ PTRS_PER_PMD * sizeof(pmd_t));
pud_populate(&init_mm, pud, pmd);
pmd += pmd_index(addr);
} else
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ab14b79b03f0..6e3ba8d112a2 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1406,8 +1406,8 @@ void __init early_paging_init(const struct machine_desc *mdesc,
return;
/* remap kernel code and data */
- map_start = init_mm.start_code;
- map_end = init_mm.brk;
+ map_start = init_mm.start_code & PMD_MASK;
+ map_end = ALIGN(init_mm.brk, PMD_SIZE);
/* get a handle on things... */
pgd0 = pgd_offset_k(0);
@@ -1442,7 +1442,7 @@ void __init early_paging_init(const struct machine_desc *mdesc,
}
/* remap pmds for kernel mapping */
- phys = __pa(map_start) & PMD_MASK;
+ phys = __pa(map_start);
do {
*pmdk++ = __pmd(phys | pmdprot);
phys += PMD_SIZE;
diff --git a/arch/arm/xen/grant-table.c b/arch/arm/xen/grant-table.c
index 859a9bb002d5..91cf08ba1e95 100644
--- a/arch/arm/xen/grant-table.c
+++ b/arch/arm/xen/grant-table.c
@@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
{
return -ENOSYS;
}
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
+{
+ return 0;
+}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a474de346be6..839f48c26ef0 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -4,6 +4,7 @@ config ARM64
select ARCH_HAS_OPP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 60f2f4c12256..79cd911ef88c 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, first);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, first);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
first);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
first);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_enc, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_dec, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 993bce527b85..902eb708804a 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -56,6 +56,8 @@
#define TASK_SIZE_32 UL(0x100000000)
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
#else
#define TASK_SIZE TASK_SIZE_64
#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 60e98a639ac5..e786e6cdc400 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -12,8 +12,6 @@
#include <linux/efi.h>
#include <linux/libfdt.h>
#include <asm/sections.h>
-#include <generated/compile.h>
-#include <generated/utsrelease.h>
/*
* AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 9aecbace4128..13bbc3be6f5a 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -27,8 +27,10 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
copy_page(kto, kfrom);
__flush_dcache_area(kto, PAGE_SIZE);
}
+EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
{
clear_page(kaddr);
}
+EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f43db8a69262..e90c5426fe14 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
early_param("initrd", early_initrd);
#endif
+/*
+ * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+static phys_addr_t max_zone_dma_phys(void)
+{
+ phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+ return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+}
+
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
@@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
- unsigned long max_dma_phys =
- (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
- max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+ max_dma = PFN_DOWN(max_zone_dma_phys());
zone_size[ZONE_DMA] = max_dma - min;
}
zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))
- dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1;
+ dma_phys_limit = max_zone_dma_phys();
dma_contiguous_reserve(dma_phys_limit);
memblock_allow_resize();
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index a7e9bfd84183..fcec5ce71392 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN_V3=y
+CONFIG_SPI_ADI_V3=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index ba35864b2b74..c9eec84aa258 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@ SECTIONS
.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
#else
- .init.data : AT(__data_lma + __data_len)
+ .init.data : AT(__data_lma + __data_len + 32)
{
__sinitdata = .;
INIT_DATA
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 63b0e4fe760c..0ccf0cf4daaf 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <linux/i2c.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index c65c6dbda3da..1e7290ef3525 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index af58454b4bff..c7495dc74690 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index a0211225748d..6b988ad653d8 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 90138e6112c1..1fe7ff286619 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"),
- PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"),
+ PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"),
#endif
PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"),
- PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"),
+ PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+ PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+ PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"),
};
static int __init ezkit_init(void)
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 430b16d5ccb1..6ab951534d79 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -44,6 +44,7 @@
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/i2c-pca-platform.h>
#include <linux/delay.h>
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 9f777df4cacc..e862f7823e68 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -18,6 +18,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 88dee43e7abe..2de71e8c104b 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/gpio.h>
#include <linux/delay.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 1ba4600de69f..e2c0b024ce88 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
{
#define CONFIG_SMC_GCTL_VAL 0x00000010
- if (!devm_pinctrl_get_select_default(&pdev->dev))
- return -EBUSY;
bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
bfin_write32(SMC_B0CTL, 0x01002011);
bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
void bf609_nor_flash_exit(struct platform_device *pdev)
{
- devm_pinctrl_put(pdev->dev.pins->p);
bfin_write32(SMC_GCTL, 0);
}
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
- PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"),
- PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
- PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"),
-#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
- PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#else
- PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"),
-#endif
+ PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+ PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
+ PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+ PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+ PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+ PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+ PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"),
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h
index 3ca0fb965636..a1efd936dd30 100644
--- a/arch/blackfin/mach-bf609/include/mach/pm.h
+++ b/arch/blackfin/mach-bf609/include/mach/pm.h
@@ -10,6 +10,7 @@
#define __MACH_BF609_PM_H__
#include <linux/suspend.h>
+#include <linux/platform_device.h>
extern int bfin609_pm_enter(suspend_state_t state);
extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
void bfin_sec_raise_irq(unsigned int sid);
void coreb_enable(void);
-int bf609_nor_flash_init(void);
-void bf609_nor_flash_exit(void);
+int bf609_nor_flash_init(struct platform_device *pdev);
+void bf609_nor_flash_exit(struct platform_device *pdev);
#endif
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index 0cdd6955c7be..b1bfcf434d16 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static int smc_pm_syscore_suspend(void)
{
- bf609_nor_flash_exit();
+ bf609_nor_flash_exit(NULL);
return 0;
}
static void smc_pm_syscore_resume(void)
{
- bf609_nor_flash_init();
+ bf609_nor_flash_init(NULL);
}
static struct syscore_ops smc_pm_syscore_ops = {
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 867b7cef204c..1f94784eab6d 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
- bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
-
/* Enable interrupts IVG7-15 */
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index dbb118e1a4e0..a54788458ca3 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -921,7 +921,8 @@ L(nocon):
jls 1f
lsrl #1,%d1
1:
- movel %d1,m68k_init_mapped_size
+ lea %pc@(m68k_init_mapped_size),%a0
+ movel %d1,%a0@
mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\
%pc@(m68k_supervisor_cachemode)
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 958f1adb9d0c..3857737e3958 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -11,6 +11,7 @@
*/
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -30,6 +31,7 @@
unsigned long (*mach_random_get_entropy)(void);
+EXPORT_SYMBOL_GPL(mach_random_get_entropy);
/*
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index a2fa297196bc..f5645d6a89f2 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -69,8 +69,6 @@
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
-#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 608716f8496b..af3bc359dc70 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -1210,7 +1210,8 @@ static struct hp_hardware hp_hardware_list[] = {
{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
- {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
+ {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
+ {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index bb9f3b64de55..93c1963d76fe 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -4,6 +4,7 @@
* Copyright (C) 2000-2001 Hewlett Packard Company
* Copyright (C) 2000 John Marvin
* Copyright (C) 2001 Matthew Wilcox
+ * Copyright (C) 2014 Helge Deller <[email protected]>
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment. Based heavily on sys_ia32.c and sys_sparc32.c.
@@ -11,44 +12,8 @@
#include <linux/compat.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/file.h>
-#include <linux/signal.h>
-#include <linux/resource.h>
-#include <linux/times.h>
-#include <linux/time.h>
-#include <linux/smp.h>
-#include <linux/sem.h>
-#include <linux/shm.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/ncp_fs.h>
-#include <linux/poll.h>
-#include <linux/personality.h>
-#include <linux/stat.h>
-#include <linux/highmem.h>
-#include <linux/highuid.h>
-#include <linux/mman.h>
-#include <linux/binfmts.h>
-#include <linux/namei.h>
-#include <linux/vfs.h>
-#include <linux/ptrace.h>
-#include <linux/swap.h>
#include <linux/syscalls.h>
-#include <asm/types.h>
-#include <asm/uaccess.h>
-#include <asm/mmu_context.h>
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(x) printk x
-#else
-#define DBG(x)
-#endif
asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
int r22, int r21, int r20)
@@ -57,3 +22,12 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
current->comm, current->pid, r20);
return -ENOSYS;
}
+
+asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
+ compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
+ const char __user * pathname)
+{
+ return sys_fanotify_mark(fanotify_fd, flags,
+ ((__u64)mask1 << 32) | mask0,
+ dfd, pathname);
+}
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index c5fa7a697fba..84c5d3a58fa1 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -418,7 +418,7 @@
ENTRY_SAME(accept4) /* 320 */
ENTRY_SAME(prlimit64)
ENTRY_SAME(fanotify_init)
- ENTRY_COMP(fanotify_mark)
+ ENTRY_DIFF(fanotify_mark)
ENTRY_COMP(clock_adjtime)
ENTRY_SAME(name_to_handle_at) /* 325 */
ENTRY_COMP(open_by_handle_at)
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ae085ad0fba0..0bef864264c0 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
#endif
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
- memset(empty_zero_page, 0, PAGE_SIZE);
}
static void __init gateway_init(void)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bd6dd6ed3a9f..80b94b0add1f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -145,6 +145,7 @@ config PPC
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_SUPPORTS_ATOMIC_RMW
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -414,7 +415,7 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
- select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
+ select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE
help
Build a kernel suitable for use as a kdump capture kernel.
The same kernel binary can be used as production kernel and dump
@@ -1017,6 +1018,7 @@ endmenu
if PPC64
config RELOCATABLE
bool "Build a relocatable kernel"
+ depends on !COMPILE_TEST
select NONSTATIC_KERNEL
help
This builds a kernel image that is capable of running anywhere
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index bc2347774f0a..0fdd7eece6d9 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index fddb72b48ce9..d645428a65a4 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
return rb;
}
-static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+ bool is_base_size)
{
+
int size, a_psize;
/* Look at the 8 bit LP value */
unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
continue;
a_psize = __hpte_actual_psize(lp, size);
- if (a_psize != -1)
+ if (a_psize != -1) {
+ if (is_base_size)
+ return 1ul << mmu_psize_defs[size].shift;
return 1ul << mmu_psize_defs[a_psize].shift;
+ }
}
}
return 0;
}
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 1);
+}
+
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
{
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 807014dde821..c2b4dcf23d03 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -22,6 +22,7 @@
*/
#include <asm/pgtable-ppc64.h>
#include <asm/bug.h>
+#include <asm/processor.h>
/*
* Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
*/
struct subpage_prot_table {
unsigned long maxaddr; /* only addresses < this are protected */
- unsigned int **protptrs[2];
+ unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
unsigned int *low_prot[4];
};
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index f8d1d6dcf7db..e61f24ed4e65 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -19,8 +19,7 @@
#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
-#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
-#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
+#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
/*
* This is individual features
@@ -106,13 +105,6 @@
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
-#define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \
- MMU_FTR_USE_TLBIVAX_BCAST | \
- MMU_FTR_LOCK_BCAST_INVAL | \
- MMU_FTR_USE_TLBRSRV | \
- MMU_FTR_USE_PAIRED_MAS | \
- MMU_FTR_TLBIEL | \
- MMU_FTR_16M_PAGE
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 9ed737146dbb..b3e936027b26 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -61,8 +61,7 @@ struct power_pmu {
#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
-#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */
-#define PPMU_EBB 0x00000100 /* supports event based branch */
+#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
/*
* Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 9ea266eae33e..7e4612528546 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -277,6 +277,8 @@ n:
.globl n; \
n:
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
#define _KPROBE(n) \
.section ".kprobes.text","a"; \
.globl n; \
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 965291b4c2fa..0c157642c2a1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
+ { /* Power8 DD1: Does not support doorbell IPIs */
+ .pvr_mask = 0xffffff00,
+ .pvr_value = 0x004d0100,
+ .cpu_name = "POWER8 (raw)",
+ .cpu_features = CPU_FTRS_POWER8_DD1,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power8",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
{ /* Power8 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004d0000,
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 2480256272d4..5cf3d367190d 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -131,7 +131,7 @@ _GLOBAL(power7_nap)
_GLOBAL(power7_sleep)
li r3,1
- li r4,0
+ li r4,1
b power7_powersave_common
/* No return */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 658e89d2025b..db2b482af658 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
for (i = 0; i < f->num_blocks; i++) {
- f->blocks[i].data = (char *)__pa(f->blocks[i].data);
+ f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
image_size += f->blocks[i].length;
+ f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
}
next = f->next;
/* Don't translate NULL pointer for last entry */
if (f->next)
- f->next = (struct flash_block_list *)__pa(f->next);
+ f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
else
f->next = NULL;
/* make num_blocks into the version/length field */
f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
+ f->num_blocks = cpu_to_be64(f->num_blocks);
}
printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 51a3ff78838a..1007fb802e6b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
#ifdef CONFIG_SCHED_SMT
/* cpumask of CPUs with asymetric SMT dependancy */
-static const int powerpc_smt_flags(void)
+static int powerpc_smt_flags(void)
{
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 80561074078d..68468d695f12 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
goto out;
}
if (!rma_setup && is_vrma_hpte(v)) {
- unsigned long psize = hpte_page_size(v, r);
+ unsigned long psize = hpte_base_page_size(v, r);
unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr;
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 8c86422a1e37..731be7478b27 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -127,11 +127,6 @@ BEGIN_FTR_SECTION
stw r10, HSTATE_PMC + 24(r13)
stw r11, HSTATE_PMC + 28(r13)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-BEGIN_FTR_SECTION
- mfspr r9, SPRN_SIER
- std r8, HSTATE_MMCR + 40(r13)
- std r9, HSTATE_MMCR + 48(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
31:
/*
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6e6224318c36..5a24d3c2b6b8 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
r = hpte[i+1];
/*
- * Check the HPTE again, including large page size
- * Since we don't currently allow any MPSS (mixed
- * page-size segment) page sizes, it is sufficient
- * to check against the actual page size.
+ * Check the HPTE again, including base page size
*/
if ((v & valid) && (v & mask) == val &&
- hpte_page_size(v, r) == (1ul << pshift))
+ hpte_base_page_size(v, r) == (1ul << pshift))
/* Return with the HPTE still locked */
return (hash << 3) + (i >> 1);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 868347ef09fd..558a67df8126 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -48,7 +48,7 @@
*
* LR = return address to continue at after eventually re-enabling MMU
*/
-_GLOBAL(kvmppc_hv_entry_trampoline)
+_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
mflr r0
std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index e2c29e381dc7..d044b8b7c69d 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,11 @@
#include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name) name
+#else
#define FUNC(name) GLUE(.,name)
+#endif
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
#elif defined(CONFIG_PPC_BOOK3S_32)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9eec675220e6..16c4d88ba27d 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,11 @@
#if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name) name
+#else
#define FUNC(name) GLUE(.,name)
+#endif
#elif defined(CONFIG_PPC_BOOK3S_32)
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
* On entry, r4 contains the guest shadow MSR
* MSR.EE has to be 0 when calling this function
*/
-_GLOBAL(kvmppc_entry_trampoline)
+_GLOBAL_TOC(kvmppc_entry_trampoline)
mfmsr r5
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
toreal(r7)
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index edb14ba992b3..ef27fbd5d9c5 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq, server, priority;
int rc;
- if (args->nargs != 3 || args->nret != 1) {
+ if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
- irq = args->args[0];
- server = args->args[1];
- priority = args->args[2];
+ irq = be32_to_cpu(args->args[0]);
+ server = be32_to_cpu(args->args[1]);
+ priority = be32_to_cpu(args->args[2]);
rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
if (rc)
rc = -3;
out:
- args->rets[0] = rc;
+ args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq, server, priority;
int rc;
- if (args->nargs != 1 || args->nret != 3) {
+ if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
rc = -3;
goto out;
}
- irq = args->args[0];
+ irq = be32_to_cpu(args->args[0]);
server = priority = 0;
rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
goto out;
}
- args->rets[1] = server;
- args->rets[2] = priority;
+ args->rets[1] = cpu_to_be32(server);
+ args->rets[2] = cpu_to_be32(priority);
out:
- args->rets[0] = rc;
+ args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq;
int rc;
- if (args->nargs != 1 || args->nret != 1) {
+ if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
- irq = args->args[0];
+ irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_off(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
- args->rets[0] = rc;
+ args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq;
int rc;
- if (args->nargs != 1 || args->nret != 1) {
+ if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
- irq = args->args[0];
+ irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_on(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
- args->rets[0] = rc;
+ args->rets[0] = cpu_to_be32(rc);
}
#endif /* CONFIG_KVM_XICS */
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
return rc;
}
-static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
- int i;
-
- args->token = be32_to_cpu(args->token);
- args->nargs = be32_to_cpu(args->nargs);
- args->nret = be32_to_cpu(args->nret);
- for (i = 0; i < args->nargs; i++)
- args->args[i] = be32_to_cpu(args->args[i]);
-#endif
-}
-
-static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
- int i;
-
- for (i = 0; i < args->nret; i++)
- args->args[i] = cpu_to_be32(args->args[i]);
- args->token = cpu_to_be32(args->token);
- args->nargs = cpu_to_be32(args->nargs);
- args->nret = cpu_to_be32(args->nret);
-#endif
-}
-
int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
{
struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc)
goto fail;
- kvmppc_rtas_swap_endian_in(&args);
-
/*
* args->rets is a pointer into args->args. Now that we've
* copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
* value so we can restore it on the way out.
*/
orig_rets = args.rets;
- args.rets = &args.args[args.nargs];
+ args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->lock);
rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
- if (d->token == args.token) {
+ if (d->token == be32_to_cpu(args.token)) {
d->handler->handler(vcpu, &args);
rc = 0;
break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc == 0) {
args.rets = orig_rets;
- kvmppc_rtas_swap_endian_out(&args);
rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
if (rc)
goto fail;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03f406f..86903d3f5a03 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (printk_ratelimit())
pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
__func__, (long)gfn, pfn);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 0738f96befbf..43435c6892fb 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -77,7 +77,7 @@ _GLOBAL(memset)
stb r4,0(r6)
blr
-_GLOBAL(memmove)
+_GLOBAL_TOC(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
b memcpy
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 412dd46dd0b7..5c09f365c842 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = regs->gpr[rb] & 0x3f;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
- if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
+ if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = rb;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
- if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+ if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef __powerpc64__
case 27: /* sld */
- sh = regs->gpr[rd] & 0x7f;
+ sh = regs->gpr[rb] & 0x7f;
if (sh < 64)
regs->gpr[ra] = regs->gpr[rd] << sh;
else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = regs->gpr[rb] & 0x7f;
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
- if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
+ if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = rb | ((instr & 2) << 4);
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
- if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+ if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index af3d78e19302..928ebe79668b 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -410,17 +410,7 @@ void __init mmu_context_init(void)
} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
first_context = 1;
last_context = 65535;
- } else
-#ifdef CONFIG_PPC_BOOK3E_MMU
- if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
- u32 mmucfg = mfspr(SPRN_MMUCFG);
- u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
- >> MMUCFG_PIDSIZE_SHIFT;
- first_context = 1;
- last_context = (1UL << (pid_bits + 1)) - 1;
- } else
-#endif
- {
+ } else {
first_context = 1;
last_context = 255;
}
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 6dcdadefd8d0..82e82cadcde5 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+ BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
vlan_tci));
- if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
- PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
- else
+ if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
+ PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
+ } else {
PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
+ PPC_SRWI(r_A, r_A, 12);
+ }
break;
case BPF_ANC | SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4520c9356b54..fe52db2eea6a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -485,7 +485,7 @@ static bool is_ebb_event(struct perf_event *event)
* check that the PMU supports EBB, meaning those that don't can still
* use bit 63 of the event code for something else if they wish.
*/
- return (ppmu->flags & PPMU_EBB) &&
+ return (ppmu->flags & PPMU_ARCH_207S) &&
((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
}
@@ -777,7 +777,7 @@ void perf_event_print_debug(void)
if (ppmu->flags & PPMU_HAS_SIER)
sier = mfspr(SPRN_SIER);
- if (ppmu->flags & PPMU_EBB) {
+ if (ppmu->flags & PPMU_ARCH_207S) {
pr_info("MMCR2: %016lx EBBHR: %016lx\n",
mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
pr_info("EBBRR: %016lx BESCR: %016lx\n",
@@ -996,7 +996,22 @@ static void power_pmu_read(struct perf_event *event)
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
- local64_sub(delta, &event->hw.period_left);
+
+ /*
+ * A number of places program the PMC with (0x80000000 - period_left).
+ * We never want period_left to be less than 1 because we will program
+ * the PMC with a value >= 0x800000000 and an edge detected PMC will
+ * roll around to 0 before taking an exception. We have seen this
+ * on POWER8.
+ *
+ * To fix this, clamp the minimum value of period_left to 1.
+ */
+ do {
+ prev = local64_read(&event->hw.period_left);
+ val = prev - delta;
+ if (val < 1)
+ val = 1;
+ } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
}
/*
@@ -1292,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
out_enable:
pmao_restore_workaround(ebb);
+ if (ppmu->flags & PPMU_ARCH_207S)
+ mtspr(SPRN_MMCR2, 0);
+
mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
mb();
@@ -1696,7 +1714,7 @@ static int power_pmu_event_init(struct perf_event *event)
if (has_branch_stack(event)) {
/* PMU has BHRB enabled */
- if (!(ppmu->flags & PPMU_BHRB))
+ if (!(ppmu->flags & PPMU_ARCH_207S))
return -EOPNOTSUPP;
}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index fe2763b6e039..639cd9156585 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -792,7 +792,7 @@ static struct power_pmu power8_pmu = {
.get_constraint = power8_get_constraint,
.get_alternatives = power8_get_alternatives,
.disable_pmc = power8_disable_pmc,
- .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
+ .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
.cache_events = &power8_cache_events,
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 38e0a1a5cec3..5e6e0bad6db6 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -111,6 +111,7 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
return ret;
}
+#ifdef CONFIG_COREDUMP
int elf_coredump_extra_notes_size(void)
{
struct spufs_calls *calls;
@@ -142,6 +143,7 @@ int elf_coredump_extra_notes_write(struct coredump_params *cprm)
return ret;
}
+#endif
void notify_spus_active(void)
{
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index b9d5d678aa44..52a7d2596d30 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,8 +1,9 @@
obj-$(CONFIG_SPU_FS) += spufs.o
-spufs-y += inode.o file.o context.o syscalls.o coredump.o
+spufs-y += inode.o file.o context.o syscalls.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
spufs-y += switch.o fault.o lscsa_alloc.o
+spufs-$(CONFIG_COREDUMP) += coredump.o
# magic for the trace events
CFLAGS_sched.o := -I$(src)
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index b045fdda4845..a87200a535fa 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -79,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
struct spufs_calls spufs_calls = {
.create_thread = do_spu_create,
.spu_run = do_spu_run,
- .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
- .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
.notify_spus_active = do_notify_spus_active,
.owner = THIS_MODULE,
+#ifdef CONFIG_COREDUMP
+ .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
+ .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
+#endif
};
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 10268c41d830..0ad533b617f7 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
- pr_err("ELOG: Opal log read failed\n");
+ pr_err("ELOG: OPAL log info read failed\n");
return;
}
@@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
log_id = be64_to_cpu(id);
elog_type = be64_to_cpu(type);
- BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
+ WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
elog_size = OPAL_MAX_ERRLOG_SIZE;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 022b38e6a80b..2d0b4d68a40a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
}
of_node_set_flag(dn, OF_DYNAMIC);
+ of_node_init(dn);
return dn;
}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 0435bb65d0aa..1c0a60d98867 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
np->properties = proplist;
of_node_set_flag(np, OF_DYNAMIC);
+ of_node_init(np);
np->parent = derive_parent(path);
if (IS_ERR(np->parent)) {
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index df38c70cd59e..18ea9e3f8142 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
return 0;
asm volatile(
- "0: lfpc %1\n"
- " la %0,0\n"
+ " lfpc %1\n"
+ "0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 7ba7d6784510..e88d35d74950 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_ZEC12)
- .long 3, 0xc100efea, 0xf46ce800, 0x00400000
+ .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
- .long 2, 0xc100efea, 0xf46c0000
+ .long 2, 0xc100eff2, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10)
- .long 2, 0xc100efea, 0xf0680000
+ .long 2, 0xc100eff2, 0xf0680000
#elif defined(CONFIG_MARCH_Z9_109)
.long 1, 0xc100efc2
#elif defined(CONFIG_MARCH_Z990)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2d716734b5b1..5dc7ad9e2fbf 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
unsigned long mask = PSW_MASK_USER;
mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
- if ((data & ~mask) != PSW_USER_BITS)
+ if ((data ^ PSW_USER_BITS) & ~mask)
+ /* Invalid psw mask. */
+ return -EINVAL;
+ if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
+ /* Invalid address-space-control bits */
return -EINVAL;
if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+ /* Invalid addressing mode bits */
return -EINVAL;
}
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
/* Build a 64 bit psw mask from 31 bit mask. */
- if ((tmp & ~mask) != PSW32_USER_BITS)
+ if ((tmp ^ PSW32_USER_BITS) & ~mask)
/* Invalid psw mask. */
return -EINVAL;
+ if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
+ /* Invalid address-space-control bits */
+ return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
(__u64)(tmp & mask) << 32;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 9ddc51eeb8d6..30de42730b2f 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -48,13 +48,10 @@
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
-static void zpci_enable_irq(struct irq_data *data);
-static void zpci_disable_irq(struct irq_data *data);
-
static struct irq_chip zpci_irq_chip = {
.name = "zPCI",
- .irq_unmask = zpci_enable_irq,
- .irq_mask = zpci_disable_irq,
+ .irq_unmask = unmask_msi_irq,
+ .irq_mask = mask_msi_irq,
};
static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
return rc;
}
-static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
-{
- int offset, pos;
- u32 mask_bits;
-
- if (msi->msi_attrib.is_msix) {
- offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL;
- msi->masked = readl(msi->mask_base + offset);
- writel(flag, msi->mask_base + offset);
- } else if (msi->msi_attrib.maskbit) {
- pos = (long) msi->mask_base;
- pci_read_config_dword(msi->dev, pos, &mask_bits);
- mask_bits &= ~(mask);
- mask_bits |= flag & mask;
- pci_write_config_dword(msi->dev, pos, mask_bits);
- } else
- return 0;
-
- msi->msi_attrib.maskbit = !!flag;
- return 1;
-}
-
-static void zpci_enable_irq(struct irq_data *data)
-{
- struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
- zpci_msi_set_mask_bits(msi, 1, 0);
-}
-
-static void zpci_disable_irq(struct irq_data *data)
-{
- struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
- zpci_msi_set_mask_bits(msi, 1, 1);
-}
-
void pcibios_fixup_bus(struct pci_bus *bus)
{
}
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
/* Release MSI interrupts */
list_for_each_entry(msi, &pdev->msi_list, list) {
- zpci_msi_set_mask_bits(msi, 1, 1);
+ if (msi->msi_attrib.is_msix)
+ default_msix_mask_irq(msi, 1);
+ else
+ default_msi_mask_irq(msi, 1, 1);
irq_set_msi_desc(msi->irq, NULL);
irq_free_desc(msi->irq);
msi->msg.address_lo = 0;
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index d4d16e4be07c..bf5b3f5f4962 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -32,7 +32,8 @@ endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
- $(call cc-option,-m2a-nofpu,)
+ $(call cc-option,-m2a-nofpu,) \
+ $(call cc-option,-m4-nofpu,)
cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
$(call cc-option,-mno-implicit-fp,-m4-nofpu)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 29f2e988c56a..407c87d9879a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -78,6 +78,7 @@ config SPARC64
select HAVE_C_RECORDMCOUNT
select NO_BOOTMEM
select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_SUPPORTS_ATOMIC_RMW
config ARCH_DEFCONFIG
string
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b73274fb961a..42f2bca1d338 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -410,8 +410,9 @@
#define __NR_finit_module 342
#define __NR_sched_setattr 343
#define __NR_sched_getattr 344
+#define __NR_renameat2 345
-#define NR_syscalls 345
+#define NR_syscalls 346
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index d066eb18650c..f834224208ed 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
.globl sys32_mmap2
sys32_mmap2:
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 151ace8766cc..85fe9b1087cd 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -86,3 +86,4 @@ sys_call_table:
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+/*345*/ .long sys_renameat2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 4bd4e2bb26cf..33ecba2826ea 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -87,6 +87,7 @@ sys_call_table32:
/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+ .word sys32_renameat2
#endif /* CONFIG_COMPAT */
@@ -165,3 +166,4 @@ sys_call_table:
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+ .word sys_renameat2
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 9472079471bb..f1b3eb14b855 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -12,6 +12,7 @@
#include <mem_user.h>
#include <os.h>
#include <skas.h>
+#include <kern_util.h>
struct host_vm_change {
struct host_vm_op {
@@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
+ if ((addr >= STUB_START) && (addr < STUB_END))
+ return -EINVAL;
+
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MUNMAP) &&
@@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
/* This is not an else because ret is modified above */
if (ret) {
printk(KERN_ERR "fix_range_common: failed, killing current "
- "process\n");
+ "process: %d\n", task_tgid_vnr(current));
+ /* We are under mmap_sem, release it such that current can terminate */
+ up_write(&current->mm->mmap_sem);
force_sig(SIGKILL, current);
+ do_signal();
}
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 974b87474a99..5678c3571e7c 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
int is_write = FAULT_WRITE(fi);
unsigned long address = FAULT_ADDRESS(fi);
- if (regs)
+ if (!is_user && regs)
current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
if (!is_user && (address >= start_vm) && (address < end_vm)) {
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index d531879a4617..908579f2b0ab 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
void wait_stub_done(int pid)
{
- int n, status, err, bad_stop = 0;
+ int n, status, err;
while (1) {
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,8 +74,6 @@ void wait_stub_done(int pid)
if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
return;
- else
- bad_stop = 1;
bad_wait:
err = ptrace_dump_regs(pid);
@@ -85,10 +83,7 @@ bad_wait:
printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
"pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
status);
- if (bad_stop)
- kill(pid, SIGKILL);
- else
- fatal_sigsegv();
+ fatal_sigsegv();
}
extern unsigned long current_stub_stack(void);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a8f749ef0fdc..d24887b645dc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -131,6 +131,7 @@ config X86
select HAVE_CC_STACKPROTECTOR
select GENERIC_CPU_AUTOPROBE
select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_SUPPORTS_ATOMIC_RMW
config INSTRUCTION_DECODER
def_bool y
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 84c223479e3c..7a6d43a554d7 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -91,10 +91,9 @@ bs_die:
.section ".bsdata", "a"
bugger_off_msg:
- .ascii "Direct floppy boot is not supported. "
- .ascii "Use a boot loader program instead.\r\n"
+ .ascii "Use a boot loader.\r\n"
.ascii "\n"
- .ascii "Remove disk and press any key to reboot ...\r\n"
+ .ascii "Remove disk and press any key to reboot...\r\n"
.byte 0
#ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
#else
.word 0x8664 # x86-64
#endif
- .word 3 # nr_sections
+ .word 4 # nr_sections
.long 0 # TimeDateStamp
.long 0 # PointerToSymbolTable
.long 1 # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
.word 0 # NumberOfLineNumbers
.long 0x60500020 # Characteristics (section flags)
+ #
+ # The offset & size fields are filled in by build.c.
+ #
+ .ascii ".bss"
+ .byte 0
+ .byte 0
+ .byte 0
+ .byte 0
+ .long 0
+ .long 0x0
+ .long 0 # Size of initialized data
+ # on disk
+ .long 0x0
+ .long 0 # PointerToRelocations
+ .long 0 # PointerToLineNumbers
+ .word 0 # NumberOfRelocations
+ .word 0 # NumberOfLineNumbers
+ .long 0xc8000080 # Characteristics (section flags)
+
#endif /* CONFIG_EFI_STUB */
# Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 1a2f2121cada..a7661c430cd9 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -143,7 +143,7 @@ static void usage(void)
#ifdef CONFIG_EFI_STUB
-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
{
unsigned int pe_header;
unsigned short num_sections;
@@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
put_unaligned_le32(size, section + 0x8);
/* section header vma field */
- put_unaligned_le32(offset, section + 0xc);
+ put_unaligned_le32(vma, section + 0xc);
/* section header 'size of initialised data' field */
- put_unaligned_le32(size, section + 0x10);
+ put_unaligned_le32(datasz, section + 0x10);
/* section header 'file offset' field */
put_unaligned_le32(offset, section + 0x14);
@@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
}
}
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+ update_pecoff_section_header_fields(section_name, offset, size, size, offset);
+}
+
static void update_pecoff_setup_and_reloc(unsigned int size)
{
u32 setup_offset = 0x200;
@@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
pe_header = get_unaligned_le32(&buf[0x3c]);
- /* Size of image */
- put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
/*
* Size of code: Subtract the size of the first sector (512 bytes)
* which includes the header.
@@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
update_pecoff_section_header(".text", text_start, text_sz);
}
+static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
+{
+ unsigned int pe_header;
+ unsigned int bss_sz = init_sz - file_sz;
+
+ pe_header = get_unaligned_le32(&buf[0x3c]);
+
+ /* Size of uninitialized data */
+ put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
+
+ /* Size of image */
+ put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
+
+ update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
+}
+
static int reserve_pecoff_reloc_section(int c)
{
/* Reserve 0x20 bytes for .reloc section */
@@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
static inline void update_pecoff_text(unsigned int text_start,
unsigned int file_sz) {}
+static inline void update_pecoff_bss(unsigned int file_sz,
+ unsigned int init_sz) {}
static inline void efi_stub_defaults(void) {}
static inline void efi_stub_entry_update(void) {}
@@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
int main(int argc, char ** argv)
{
- unsigned int i, sz, setup_sectors;
+ unsigned int i, sz, setup_sectors, init_sz;
int c;
u32 sys_size;
struct stat sb;
@@ -376,7 +396,9 @@ int main(int argc, char ** argv)
buf[0x1f1] = setup_sectors-1;
put_unaligned_le32(sys_size, &buf[0x1f4]);
- update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+ update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
+ init_sz = get_unaligned_le32(&buf[0x260]);
+ update_pecoff_bss(i + (sys_size * 16), init_sz);
efi_stub_entry_update();
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index f30cd10293f0..8626b03e83b7 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
/* save number of bits */
bits[1] = cpu_to_be64(sctx->count[0] << 3);
- bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61;
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
/* Pad out to 112 mod 128 and append length */
index = sctx->count[0] & 0x7f;
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index bba3cf88e624..0a8b519226b8 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
-#define INTERRUPT_RETURN iretq
+#define INTERRUPT_RETURN jmp native_iret
#define USERGS_SYSRET64 \
swapgs; \
sysretq;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f3a1f04ed4cb..584874451414 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -841,7 +841,6 @@ static int apm_do_idle(void)
u32 eax;
u8 ret = 0;
int idled = 0;
- int polling;
int err = 0;
if (!need_resched()) {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index a80029035bf2..f9e4fdd3b877 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
*/
detect_extended_topology(c);
+ if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
+ /*
+ * let's use the legacy cpuid vector 0x1 and 0x4 for topology
+ * detection.
+ */
+ c->x86_max_cores = intel_num_cpu_cores(c);
+#ifdef CONFIG_X86_32
+ detect_ht(c);
+#endif
+ }
+
l2 = init_intel_cacheinfo(c);
if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_P3);
#endif
- if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
- /*
- * let's use the legacy cpuid vector 0x1 and 0x4 for topology
- * detection.
- */
- c->x86_max_cores = intel_num_cpu_cores(c);
-#ifdef CONFIG_X86_32
- detect_ht(c);
-#endif
- }
-
/* Work around errata */
srat_detect_node(c);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a952e9c85b6f..9c8f7394c612 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
#endif
}
+#ifdef CONFIG_X86_HT
+ /*
+ * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
+ * turns means that the only possibility is SMT (as indicated in
+ * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
+ * that SMT shares all caches, we can unconditionally set cpu_llc_id to
+ * c->phys_proc_id.
+ */
+ if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
+ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+#endif
+
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
return l2;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bb92f38153b2..9a79c8dbd8e8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
for_each_online_cpu(i) {
err = mce_device_create(i);
if (err) {
+ /*
+ * Register notifier anyway (and do not unreg it) so
+ * that we don't leave undeleted timers, see notifier
+ * callback above.
+ */
+ __register_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done();
goto err_device_create;
}
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
err_register:
unregister_syscore_ops(&mce_syscore_ops);
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&mce_cpu_notifier);
- cpu_notifier_register_done();
-
err_device_create:
/*
* We didn't keep track of which devices were created above, but
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bdfbff8a4f6..2879ecdaac43 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
+ /* Check if the extra msrs can be safely accessed*/
+ if (!er->extra_msr_access)
+ return -ENXIO;
reg->idx = er->idx;
reg->config = event->attr.config1;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3b2f9bdd974b..8ade93111e03 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -295,14 +295,16 @@ struct extra_reg {
u64 config_mask;
u64 valid_mask;
int idx; /* per_xxx->regs[] reg index */
+ bool extra_msr_access;
};
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
- .event = (e), \
- .msr = (ms), \
- .config_mask = (m), \
- .valid_mask = (vm), \
- .idx = EXTRA_REG_##i, \
+ .event = (e), \
+ .msr = (ms), \
+ .config_mask = (m), \
+ .valid_mask = (vm), \
+ .idx = EXTRA_REG_##i, \
+ .extra_msr_access = true, \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index adb02aa62af5..2502d0d9d246 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1382,6 +1382,15 @@ again:
intel_pmu_lbr_read();
/*
+ * CondChgd bit 63 doesn't mean any overflow status. Ignore
+ * and clear the bit.
+ */
+ if (__test_and_clear_bit(63, (unsigned long *)&status)) {
+ if (!status)
+ goto done;
+ }
+
+ /*
* PEBS overflow sets bit 62 in the global status register
*/
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
@@ -2173,6 +2182,41 @@ static void intel_snb_check_microcode(void)
}
}
+/*
+ * Under certain circumstances, access certain MSR may cause #GP.
+ * The function tests if the input MSR can be safely accessed.
+ */
+static bool check_msr(unsigned long msr, u64 mask)
+{
+ u64 val_old, val_new, val_tmp;
+
+ /*
+ * Read the current value, change it and read it back to see if it
+ * matches, this is needed to detect certain hardware emulators
+ * (qemu/kvm) that don't trap on the MSR access and always return 0s.
+ */
+ if (rdmsrl_safe(msr, &val_old))
+ return false;
+
+ /*
+ * Only change the bits which can be updated by wrmsrl.
+ */
+ val_tmp = val_old ^ mask;
+ if (wrmsrl_safe(msr, val_tmp) ||
+ rdmsrl_safe(msr, &val_new))
+ return false;
+
+ if (val_new != val_tmp)
+ return false;
+
+ /* Here it's sure that the MSR can be safely accessed.
+ * Restore the old value and return.
+ */
+ wrmsrl(msr, val_old);
+
+ return true;
+}
+
static __init void intel_sandybridge_quirk(void)
{
x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2262,7 +2306,8 @@ __init int intel_pmu_init(void)
union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int unused;
- int version;
+ struct extra_reg *er;
+ int version, i;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
@@ -2465,6 +2510,9 @@ __init int intel_pmu_init(void)
case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ /* dTLB-load-misses on IVB is different than SNB */
+ hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
+
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
@@ -2565,6 +2613,34 @@ __init int intel_pmu_init(void)
}
}
+ /*
+ * Access LBR MSR may cause #GP under certain circumstances.
+ * E.g. KVM doesn't support LBR MSR
+ * Check all LBT MSR here.
+ * Disable LBR access if any LBR MSRs can not be accessed.
+ */
+ if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+ x86_pmu.lbr_nr = 0;
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+ check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
+ x86_pmu.lbr_nr = 0;
+ }
+
+ /*
+ * Access extra MSR may cause #GP under certain circumstances.
+ * E.g. KVM doesn't support offcore event
+ * Check all extra_regs here.
+ */
+ if (x86_pmu.extra_regs) {
+ for (er = x86_pmu.extra_regs; er->msr; er++) {
+ er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
+ /* Disable LBR select mapping */
+ if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
+ x86_pmu.lbr_sel_map = NULL;
+ }
+ }
+
/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
x86_pmu.max_period = x86_pmu.cntval_mask;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 980970cb744d..696ade311ded 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
if (!x86_pmu.bts)
return 0;
- buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
- if (unlikely(!buffer))
+ buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+ if (unlikely(!buffer)) {
+ WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
return -ENOMEM;
+ }
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
thresh = max / 16;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 65bbbea38b9c..ae6552a0701f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index dbaa23e78b36..0d0c9d4ab6d5 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -425,8 +425,8 @@ sysenter_do_call:
cmpl $(NR_syscalls), %eax
jae sysenter_badsys
call *sys_call_table(,%eax,4)
- movl %eax,PT_EAX(%esp)
sysenter_after_call:
+ movl %eax,PT_EAX(%esp)
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
@@ -502,6 +502,7 @@ ENTRY(system_call)
jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
+syscall_after_call:
movl %eax,PT_EAX(%esp) # store the return value
syscall_exit:
LOCKDEP_SYS_EXIT
@@ -675,12 +676,12 @@ syscall_fault:
END(syscall_fault)
syscall_badsys:
- movl $-ENOSYS,PT_EAX(%esp)
- jmp syscall_exit
+ movl $-ENOSYS,%eax
+ jmp syscall_after_call
END(syscall_badsys)
sysenter_badsys:
- movl $-ENOSYS,PT_EAX(%esp)
+ movl $-ENOSYS,%eax
jmp sysenter_after_call
END(syscall_badsys)
CFI_ENDPROC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b25ca969edd2..c844f0816ab8 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -830,27 +830,24 @@ restore_args:
RESTORE_ARGS 1,8,1
irq_return:
+ INTERRUPT_RETURN
+
+ENTRY(native_iret)
/*
* Are we returning to a stack segment from the LDT? Note: in
* 64-bit mode SS:RSP on the exception stack is always valid.
*/
#ifdef CONFIG_X86_ESPFIX64
testb $4,(SS-RIP)(%rsp)
- jnz irq_return_ldt
+ jnz native_irq_return_ldt
#endif
-irq_return_iret:
- INTERRUPT_RETURN
- _ASM_EXTABLE(irq_return_iret, bad_iret)
-
-#ifdef CONFIG_PARAVIRT
-ENTRY(native_iret)
+native_irq_return_iret:
iretq
- _ASM_EXTABLE(native_iret, bad_iret)
-#endif
+ _ASM_EXTABLE(native_irq_return_iret, bad_iret)
#ifdef CONFIG_X86_ESPFIX64
-irq_return_ldt:
+native_irq_return_ldt:
pushq_cfi %rax
pushq_cfi %rdi
SWAPGS
@@ -872,7 +869,7 @@ irq_return_ldt:
SWAPGS
movq %rax,%rsp
popq_cfi %rax
- jmp irq_return_iret
+ jmp native_irq_return_iret
#endif
.section .fixup,"ax"
@@ -956,13 +953,8 @@ __do_double_fault:
cmpl $__KERNEL_CS,CS(%rdi)
jne do_double_fault
movq RIP(%rdi),%rax
- cmpq $irq_return_iret,%rax
-#ifdef CONFIG_PARAVIRT
- je 1f
- cmpq $native_iret,%rax
-#endif
+ cmpq $native_irq_return_iret,%rax
jne do_double_fault /* This shouldn't happen... */
-1:
movq PER_CPU_VAR(kernel_stack),%rax
subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
movq %rax,RSP(%rdi)
@@ -1428,7 +1420,7 @@ error_sti:
*/
error_kernelspace:
incl %ebx
- leaq irq_return_iret(%rip),%rcx
+ leaq native_irq_return_iret(%rip),%rcx
cmpq %rcx,RIP+8(%rsp)
je error_swapgs
movl %ecx,%eax /* zero extend */
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 6afbb16e9b79..94d857fb1033 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -175,7 +175,7 @@ void init_espfix_ap(void)
if (!pud_present(pud)) {
pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
- paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
+ paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PUD_CLONES; n++)
set_pud(&pud_p[n], pud);
}
@@ -185,7 +185,7 @@ void init_espfix_ap(void)
if (!pmd_present(pmd)) {
pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
- paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
+ paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PMD_CLONES; n++)
set_pmd(&pmd_p[n], pmd);
}
@@ -193,7 +193,6 @@ void init_espfix_ap(void)
pte_p = pte_offset_kernel(&pmd, addr);
stack_page = (void *)__get_free_page(GFP_KERNEL);
pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
- paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PTE_CLONES; n++)
set_pte(&pte_p[n*PTE_STRIDE], pte);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7596df664901..67e6d19ef1be 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
struct kprobe *p;
struct kprobe_ctlblk *kcb;
+ if (user_mode_vm(regs))
+ return 0;
+
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
/*
* We don't want to be preempted for the entire
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 3f08f34f93eb..a1da6737ba5b 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
-DEF_NATIVE(pv_cpu_ops, iret, "iretq");
DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_irq_ops, save_fl);
PATCH_SITE(pv_irq_ops, irq_enable);
PATCH_SITE(pv_irq_ops, irq_disable);
- PATCH_SITE(pv_cpu_ops, iret);
PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
PATCH_SITE(pv_cpu_ops, usergs_sysret32);
PATCH_SITE(pv_cpu_ops, usergs_sysret64);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 57e5ce126d5a..ea030319b321 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
mark_tsc_unstable("cpufreq changes");
- }
- set_cyc2ns_scale(tsc_khz, freq->cpu);
+ set_cyc2ns_scale(tsc_khz, freq->cpu);
+ }
return 0;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f6449334ec45..ef432f891d30 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
kvm_x86_ops->set_nmi(vcpu);
}
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
+ /*
+ * Because interrupts can be injected asynchronously, we are
+ * calling check_nested_events again here to avoid a race condition.
+ * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
+ * proposal and current concerns. Perhaps we should be setting
+ * KVM_REQ_EVENT only on certain events and not unconditionally?
+ */
+ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
+ r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+ if (r != 0)
+ return r;
+ }
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
false);
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
index df95a2fdff73..11b65d4f9414 100644
--- a/arch/x86/vdso/vdso2c.h
+++ b/arch/x86/vdso/vdso2c.h
@@ -93,6 +93,9 @@ static void BITSFUNC(copy_section)(struct BITSFUNC(fake_sections) *out,
uint64_t flags = GET_LE(&in->sh_flags);
bool copy = flags & SHF_ALLOC &&
+ (GET_LE(&in->sh_size) ||
+ (GET_LE(&in->sh_type) != SHT_RELA &&
+ GET_LE(&in->sh_type) != SHT_REL)) &&
strcmp(name, ".altinstructions") &&
strcmp(name, ".altinstr_replacement");
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index e1513c47872a..5a5176de8d0a 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -62,6 +62,9 @@ struct linux_binprm;
Only used for the 64-bit and x32 vdsos. */
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
+#ifdef CONFIG_X86_32
+ return 0;
+#else
unsigned long addr, end;
unsigned offset;
end = (start + PMD_SIZE - 1) & PMD_MASK;
@@ -83,6 +86,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
addr = align_vdso_addr(addr);
return addr;
+#endif
}
static int map_vdso(const struct vdso_image *image, bool calculate_addr)
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index c98583588580..ebfa9b2c871d 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -36,99 +36,133 @@
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <xen/interface/xen.h>
#include <xen/page.h>
#include <xen/grant_table.h>
+#include <xen/xen.h>
#include <asm/pgtable.h>
-static int map_pte_fn(pte_t *pte, struct page *pmd_page,
- unsigned long addr, void *data)
+static struct gnttab_vm_area {
+ struct vm_struct *area;
+ pte_t **ptes;
+} gnttab_shared_vm_area, gnttab_status_vm_area;
+
+int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
+ unsigned long max_nr_gframes,
+ void **__shared)
{
- unsigned long **frames = (unsigned long **)data;
+ void *shared = *__shared;
+ unsigned long addr;
+ unsigned long i;
- set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
- (*frames)++;
- return 0;
-}
+ if (shared == NULL)
+ *__shared = shared = gnttab_shared_vm_area.area->addr;
-/*
- * This function is used to map shared frames to store grant status. It is
- * different from map_pte_fn above, the frames type here is uint64_t.
- */
-static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
- unsigned long addr, void *data)
-{
- uint64_t **frames = (uint64_t **)data;
+ addr = (unsigned long)shared;
+
+ for (i = 0; i < nr_gframes; i++) {
+ set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
+ mfn_pte(frames[i], PAGE_KERNEL));
+ addr += PAGE_SIZE;
+ }
- set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
- (*frames)++;
return 0;
}
-static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
- unsigned long addr, void *data)
+int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
+ unsigned long max_nr_gframes,
+ grant_status_t **__shared)
{
+ grant_status_t *shared = *__shared;
+ unsigned long addr;
+ unsigned long i;
+
+ if (shared == NULL)
+ *__shared = shared = gnttab_status_vm_area.area->addr;
+
+ addr = (unsigned long)shared;
+
+ for (i = 0; i < nr_gframes; i++) {
+ set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
+ mfn_pte(frames[i], PAGE_KERNEL));
+ addr += PAGE_SIZE;
+ }
- set_pte_at(&init_mm, addr, pte, __pte(0));
return 0;
}
-int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
- unsigned long max_nr_gframes,
- void **__shared)
+void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
{
- int rc;
- void *shared = *__shared;
+ pte_t **ptes;
+ unsigned long addr;
+ unsigned long i;
- if (shared == NULL) {
- struct vm_struct *area =
- alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
- BUG_ON(area == NULL);
- shared = area->addr;
- *__shared = shared;
- }
+ if (shared == gnttab_status_vm_area.area->addr)
+ ptes = gnttab_status_vm_area.ptes;
+ else
+ ptes = gnttab_shared_vm_area.ptes;
- rc = apply_to_page_range(&init_mm, (unsigned long)shared,
- PAGE_SIZE * nr_gframes,
- map_pte_fn, &frames);
- return rc;
+ addr = (unsigned long)shared;
+
+ for (i = 0; i < nr_gframes; i++) {
+ set_pte_at(&init_mm, addr, ptes[i], __pte(0));
+ addr += PAGE_SIZE;
+ }
}
-int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
- unsigned long max_nr_gframes,
- grant_status_t **__shared)
+static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
{
- int rc;
- grant_status_t *shared = *__shared;
+ area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
+ if (area->ptes == NULL)
+ return -ENOMEM;
- if (shared == NULL) {
- /* No need to pass in PTE as we are going to do it
- * in apply_to_page_range anyhow. */
- struct vm_struct *area =
- alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
- BUG_ON(area == NULL);
- shared = area->addr;
- *__shared = shared;
+ area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
+ if (area->area == NULL) {
+ kfree(area->ptes);
+ return -ENOMEM;
}
- rc = apply_to_page_range(&init_mm, (unsigned long)shared,
- PAGE_SIZE * nr_gframes,
- map_pte_fn_status, &frames);
- return rc;
+ return 0;
}
-void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
+static void arch_gnttab_vfree(struct gnttab_vm_area *area)
+{
+ free_vm_area(area->area);
+ kfree(area->ptes);
+}
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
{
- apply_to_page_range(&init_mm, (unsigned long)shared,
- PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
+ int ret;
+
+ if (!xen_pv_domain())
+ return 0;
+
+ ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Always allocate the space for the status frames in case
+ * we're migrated to a host with V2 support.
+ */
+ ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+ err:
+ arch_gnttab_vfree(&gnttab_shared_vm_area);
+ return -ENOMEM;
}
+
#ifdef CONFIG_XEN_PVH
#include <xen/balloon.h>
#include <xen/events.h>
-#include <xen/xen.h>
#include <linux/slab.h>
static int __init xlated_setup_gnttab_pages(void)
{
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index f9e1ec346e35..8453e6e39895 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
beqz a2, 1f # if at start of vector, don't restore
addi a0, a0, -128
- bbsi a0, 8, 1f # don't restore except for overflow 8 and 12
- bbsi a0, 7, 2f
+ bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
+
+ /*
+ * This fixup handler is for the extremely unlikely case where the
+ * overflow handler's reference thru a0 gets a hardware TLB refill
+ * that bumps out the (distinct, aliasing) TLB entry that mapped its
+ * prior references thru a9/a13, and where our reference now thru
+ * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+ */
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ bbsi.l a0, 7, 2f
/*
* Restore a0 as saved by _WindowOverflow8().
- *
- * FIXME: we really need a fixup handler for this L32E,
- * for the extremely unlikely case where the overflow handler's
- * reference thru a0 gets a hardware TLB refill that bumps out
- * the (distinct, aliasing) TLB entry that mapped its prior
- * references thru a9, and where our reference now thru a9
- * gets a 2nd-level miss exception (not hardware TLB refill).
*/
- l32e a2, a9, -16
- wsr a2, depc # replace the saved a0
- j 1f
+ l32e a0, a9, -16
+ wsr a0, depc # replace the saved a0
+ j 3f
2:
/*
* Restore a0 as saved by _WindowOverflow12().
- *
- * FIXME: we really need a fixup handler for this L32E,
- * for the extremely unlikely case where the overflow handler's
- * reference thru a0 gets a hardware TLB refill that bumps out
- * the (distinct, aliasing) TLB entry that mapped its prior
- * references thru a13, and where our reference now thru a13
- * gets a 2nd-level miss exception (not hardware TLB refill).
*/
- l32e a2, a13, -16
- wsr a2, depc # replace the saved a0
+ l32e a0, a13, -16
+ wsr a0, depc # replace the saved a0
+3:
+ xsr a3, excsave1
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1:
/*
* Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
s32i a0, a2, PT_DEPC
+_DoubleExceptionVector_handle_exception:
addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER
xsr a3, excsave1
@@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow:
rotw -3
j 1b
- .end literal_prefix
ENDPROC(_DoubleExceptionVector)
/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ * (we'll need to rotate window back and there's no place to save this
+ * information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ * about the conditions of the original double exception that happened in
+ * the window overflow handler is lost, so we just return to userspace to
+ * retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ _beqi a0, 1, .Lhandle_1
+ _beqi a0, 3, .Lhandle_3
+
+ .macro overflow_fixup_handle_exception_pane n
+
+ rsr a0, depc
+ rotw -\n
+
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+
+ movi a0, .Lrestore_\n
+ s32i a0, a2, PT_DEPC
+ rsr a0, exccause
+ j _DoubleExceptionVector_handle_exception
+
+ .endm
+
+ overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+ overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+ overflow_fixup_handle_exception_pane 3
+
+ .macro overflow_fixup_restore_a0_pane n
+
+ rotw \n
+ /* Need to preserve a0 value here to be able to handle exception
+ * that may occur on a0 reload from stack. It may occur because
+ * TLB miss handler may not be atomic and pointer to page table
+ * may be lost before we get here. There are no free registers,
+ * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+ */
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ bbsi.l a0, 7, 1f
+ l32e a0, a9, -16
+ j 2f
+1:
+ l32e a0, a13, -16
+2:
+ rotw -\n
+
+ .endm
+
+.Lrestore_2:
+ overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, 0
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ rfe
+
+.Lrestore_1:
+ overflow_fixup_restore_a0_pane 1
+ j .Lset_default_fixup
+.Lrestore_3:
+ overflow_fixup_restore_a0_pane 3
+ j .Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+ .end literal_prefix
+/*
* Debug interrupt vector
*
* There is not much space here, so simply jump to another handler.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index ee32c0085dff..d16db6df86f8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -269,13 +269,13 @@ SECTIONS
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
- DOUBLEEXC_VECTOR_VADDR - 16,
+ DOUBLEEXC_VECTOR_VADDR - 40,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
- 32,
+ 40,
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 4224256bb215..77ed20209ca5 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
return -EINVAL;
}
- if (it && start - it->start < bank_sz) {
+ if (it && start - it->start <= bank_sz) {
if (start == it->start) {
if (end - it->start < bank_sz) {
it->start = end;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b9f4cc494ece..28d227c5ca77 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
+ /*
+ * @q could be exiting and already have destroyed all blkgs as
+ * indicated by NULL root_blkg. If so, don't confuse policies.
+ */
+ if (!q->root_blkg)
+ return;
+
blk_throtl_drain(q);
}
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3f33d8672268..a185b86741e5 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
EXPORT_SYMBOL(blk_queue_find_tag);
/**
- * __blk_free_tags - release a given set of tag maintenance info
+ * blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
- * Tries to free the specified @bqt. Returns true if it was
- * actually freed and false if there are still references using it
+ * Drop the reference count on @bqt and frees it when the last reference
+ * is dropped.
*/
-static int __blk_free_tags(struct blk_queue_tag *bqt)
+void blk_free_tags(struct blk_queue_tag *bqt)
{
- int retval;
-
- retval = atomic_dec_and_test(&bqt->refcnt);
- if (retval) {
+ if (atomic_dec_and_test(&bqt->refcnt)) {
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
bqt->max_depth);
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
kfree(bqt);
}
-
- return retval;
}
+EXPORT_SYMBOL(blk_free_tags);
/**
* __blk_queue_free_tags - release tag maintenance info
@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
if (!bqt)
return;
- __blk_free_tags(bqt);
+ blk_free_tags(bqt);
q->queue_tags = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt: the tag map to free
- *
- * For externally managed @bqt frees the map. Callers of this
- * function must guarantee to have released all the queues that
- * might have been using this tag map.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
- if (unlikely(!__blk_free_tags(bqt)))
- BUG();
-}
-EXPORT_SYMBOL(blk_free_tags);
-
-/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index fbd5a67cb773..a0926a6094b2 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKROSET:
case BLKDISCARD:
case BLKSECDISCARD:
+ case BLKZEROOUT:
/*
* the ones below are implemented in blkdev_locked_ioctl,
* but we call blkdev_ioctl, which gets the lock for us
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 966f893711b3..6a3ad8011585 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/net.h>
#include <linux/rwsem.h>
+#include <linux/security.h>
struct alg_type_list {
const struct af_alg_type *type;
@@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
sock_init_data(newsock, sk2);
sock_graft(sk2, newsock);
+ security_sk_clone(sk, sk2);
err = type->accept(ask->private, sk2);
if (err) {
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index c67f6f5ad611..36b0e61f9c09 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -30,6 +30,10 @@
#include <linux/types.h>
#include <linux/dmi.h>
#include <linux/delay.h>
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
@@ -52,6 +56,7 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
+
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
@@ -67,6 +72,13 @@ static int acpi_ac_resume(struct device *dev);
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_ac_dir(void);
+extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
+static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+#endif
+
+
static int ac_sleep_before_get_state_ms;
static struct acpi_driver acpi_ac_driver = {
@@ -91,6 +103,16 @@ struct acpi_ac {
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static const struct file_operations acpi_ac_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_ac_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
/* --------------------------------------------------------------------------
AC Adapter Management
-------------------------------------------------------------------------- */
@@ -143,6 +165,83 @@ static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
+#ifdef CONFIG_ACPI_PROCFS_POWER
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_ac_dir;
+
+static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_ac *ac = seq->private;
+
+
+ if (!ac)
+ return 0;
+
+ if (acpi_ac_get_state(ac)) {
+ seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
+ return 0;
+ }
+
+ seq_puts(seq, "state: ");
+ switch (ac->state) {
+ case ACPI_AC_STATUS_OFFLINE:
+ seq_puts(seq, "off-line\n");
+ break;
+ case ACPI_AC_STATUS_ONLINE:
+ seq_puts(seq, "on-line\n");
+ break;
+ default:
+ seq_puts(seq, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int acpi_ac_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
+}
+
+static int acpi_ac_add_fs(struct acpi_ac *ac)
+{
+ struct proc_dir_entry *entry = NULL;
+
+ printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
+ " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+ if (!acpi_device_dir(ac->device)) {
+ acpi_device_dir(ac->device) =
+ proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir);
+ if (!acpi_device_dir(ac->device))
+ return -ENODEV;
+ }
+
+ /* 'state' [R] */
+ entry = proc_create_data(ACPI_AC_FILE_STATE,
+ S_IRUGO, acpi_device_dir(ac->device),
+ &acpi_ac_fops, ac);
+ if (!entry)
+ return -ENODEV;
+ return 0;
+}
+
+static int acpi_ac_remove_fs(struct acpi_ac *ac)
+{
+
+ if (acpi_device_dir(ac->device)) {
+ remove_proc_entry(ACPI_AC_FILE_STATE,
+ acpi_device_dir(ac->device));
+ remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir);
+ acpi_device_dir(ac->device) = NULL;
+ }
+
+ return 0;
+}
+#endif
+
/* --------------------------------------------------------------------------
Driver Model
-------------------------------------------------------------------------- */
@@ -243,6 +342,11 @@ static int acpi_ac_add(struct acpi_device *device)
goto end;
ac->charger.name = acpi_device_bid(device);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ result = acpi_ac_add_fs(ac);
+ if (result)
+ goto end;
+#endif
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
@@ -258,8 +362,12 @@ static int acpi_ac_add(struct acpi_device *device)
ac->battery_nb.notifier_call = acpi_ac_battery_notify;
register_acpi_notifier(&ac->battery_nb);
end:
- if (result)
+ if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_remove_fs(ac);
+#endif
kfree(ac);
+ }
dmi_check_system(ac_dmi_table);
return result;
@@ -303,6 +411,10 @@ static int acpi_ac_remove(struct acpi_device *device)
power_supply_unregister(&ac->charger);
unregister_acpi_notifier(&ac->battery_nb);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_remove_fs(ac);
+#endif
+
kfree(ac);
return 0;
@@ -315,9 +427,20 @@ static int __init acpi_ac_init(void)
if (acpi_disabled)
return -ENODEV;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_dir = acpi_lock_ac_dir();
+ if (!acpi_ac_dir)
+ return -ENODEV;
+#endif
+
+
result = acpi_bus_register_driver(&acpi_ac_driver);
- if (result < 0)
+ if (result < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
return -ENODEV;
+ }
return 0;
}
@@ -325,6 +448,9 @@ static int __init acpi_ac_init(void)
static void __exit acpi_ac_exit(void)
{
acpi_bus_unregister_driver(&acpi_ac_driver);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
}
module_init(acpi_ac_init);
module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 6703c1fd993a..4ddb0dca56f6 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -14,6 +14,8 @@
#include <linux/module.h>
static const struct acpi_device_id acpi_pnp_device_ids[] = {
+ /* soc_button_array */
+ {"PNP0C40"},
/* pata_isapnp */
{"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */
/* floppy */
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 0d7116f34b95..130f513e08c9 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
#include <asm/unaligned.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -534,6 +535,20 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
" invalid.\n");
}
+ /*
+ * When fully charged, some batteries wrongly report
+ * capacity_now = design_capacity instead of = full_charge_capacity
+ */
+ if (battery->capacity_now > battery->full_charge_capacity
+ && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
+ battery->capacity_now = battery->full_charge_capacity;
+ if (battery->capacity_now != battery->design_capacity)
+ printk_once(KERN_WARNING FW_BUG
+ "battery: reported current charge level (%d) "
+ "is higher than reported maximum charge level (%d).\n",
+ battery->capacity_now, battery->full_charge_capacity);
+ }
+
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
battery->capacity_now = (battery->capacity_now *
@@ -1151,6 +1166,28 @@ static struct dmi_system_id bat_dmi_table[] = {
{},
};
+/*
+ * Some machines'(E,G Lenovo Z480) ECs are not stable
+ * during boot up and this causes battery driver fails to be
+ * probed due to failure of getting battery information
+ * from EC sometimes. After several retries, the operation
+ * may work. So add retry code here and 20ms sleep between
+ * every retries.
+ */
+static int acpi_battery_update_retry(struct acpi_battery *battery)
+{
+ int retry, ret;
+
+ for (retry = 5; retry; retry--) {
+ ret = acpi_battery_update(battery, false);
+ if (!ret)
+ break;
+
+ msleep(20);
+ }
+ return ret;
+}
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
@@ -1169,9 +1206,11 @@ static int acpi_battery_add(struct acpi_device *device)
mutex_init(&battery->sysfs_lock);
if (acpi_has_method(battery->device->handle, "_BIX"))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
- result = acpi_battery_update(battery, false);
+
+ result = acpi_battery_update_retry(battery);
if (result)
goto fail;
+
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
#endif
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ad11ba4a412d..a66ab658abbc 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1,11 +1,14 @@
/*
- * ec.c - ACPI Embedded Controller Driver (v2.1)
+ * ec.c - ACPI Embedded Controller Driver (v2.2)
*
- * Copyright (C) 2006-2008 Alexey Starikovskiy <[email protected]>
- * Copyright (C) 2006 Denis Sadykov <[email protected]>
- * Copyright (C) 2004 Luming Yu <[email protected]>
- * Copyright (C) 2001, 2002 Andy Grover <[email protected]>
- * Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
+ * Copyright (C) 2001-2014 Intel Corporation
+ * Author: 2014 Lv Zheng <[email protected]>
+ * 2006, 2007 Alexey Starikovskiy <[email protected]>
+ * 2006 Denis Sadykov <[email protected]>
+ * 2004 Luming Yu <[email protected]>
+ * 2001, 2002 Andy Grover <[email protected]>
+ * 2001, 2002 Paul Diefenbaugh <[email protected]>
+ * Copyright (C) 2008 Alexey Starikovskiy <[email protected]>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -52,6 +55,7 @@
/* EC status register */
#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
+#define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
@@ -78,6 +82,9 @@ enum {
EC_FLAGS_BLOCKED, /* Transactions are blocked */
};
+#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
+#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
+
/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
module_param(ec_delay, uint, 0644);
@@ -109,7 +116,7 @@ struct transaction {
u8 ri;
u8 wlen;
u8 rlen;
- bool done;
+ u8 flags;
};
struct acpi_ec *boot_ec, *first_ec;
@@ -127,83 +134,104 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
{
u8 x = inb(ec->command_addr);
- pr_debug("---> status = 0x%2.2x\n", x);
+ pr_debug("EC_SC(R) = 0x%2.2x "
+ "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
+ x,
+ !!(x & ACPI_EC_FLAG_SCI),
+ !!(x & ACPI_EC_FLAG_BURST),
+ !!(x & ACPI_EC_FLAG_CMD),
+ !!(x & ACPI_EC_FLAG_IBF),
+ !!(x & ACPI_EC_FLAG_OBF));
return x;
}
static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
{
u8 x = inb(ec->data_addr);
- pr_debug("---> data = 0x%2.2x\n", x);
+ pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
return x;
}
static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
{
- pr_debug("<--- command = 0x%2.2x\n", command);
+ pr_debug("EC_SC(W) = 0x%2.2x\n", command);
outb(command, ec->command_addr);
}
static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
{
- pr_debug("<--- data = 0x%2.2x\n", data);
+ pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
outb(data, ec->data_addr);
}
-static int ec_transaction_done(struct acpi_ec *ec)
+static int ec_transaction_completed(struct acpi_ec *ec)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ec->lock, flags);
- if (!ec->curr || ec->curr->done)
+ if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
ret = 1;
spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
-static void start_transaction(struct acpi_ec *ec)
+static bool advance_transaction(struct acpi_ec *ec)
{
- ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
- ec->curr->done = false;
- acpi_ec_write_cmd(ec, ec->curr->command);
-}
-
-static void advance_transaction(struct acpi_ec *ec, u8 status)
-{
- unsigned long flags;
struct transaction *t;
+ u8 status;
+ bool wakeup = false;
- spin_lock_irqsave(&ec->lock, flags);
+ pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK");
+ status = acpi_ec_read_status(ec);
t = ec->curr;
if (!t)
- goto unlock;
- if (t->wlen > t->wi) {
- if ((status & ACPI_EC_FLAG_IBF) == 0)
- acpi_ec_write_data(ec,
- t->wdata[t->wi++]);
- else
- goto err;
- } else if (t->rlen > t->ri) {
- if ((status & ACPI_EC_FLAG_OBF) == 1) {
- t->rdata[t->ri++] = acpi_ec_read_data(ec);
- if (t->rlen == t->ri)
- t->done = true;
+ goto err;
+ if (t->flags & ACPI_EC_COMMAND_POLL) {
+ if (t->wlen > t->wi) {
+ if ((status & ACPI_EC_FLAG_IBF) == 0)
+ acpi_ec_write_data(ec, t->wdata[t->wi++]);
+ else
+ goto err;
+ } else if (t->rlen > t->ri) {
+ if ((status & ACPI_EC_FLAG_OBF) == 1) {
+ t->rdata[t->ri++] = acpi_ec_read_data(ec);
+ if (t->rlen == t->ri) {
+ t->flags |= ACPI_EC_COMMAND_COMPLETE;
+ wakeup = true;
+ }
+ } else
+ goto err;
+ } else if (t->wlen == t->wi &&
+ (status & ACPI_EC_FLAG_IBF) == 0) {
+ t->flags |= ACPI_EC_COMMAND_COMPLETE;
+ wakeup = true;
+ }
+ return wakeup;
+ } else {
+ if ((status & ACPI_EC_FLAG_IBF) == 0) {
+ acpi_ec_write_cmd(ec, t->command);
+ t->flags |= ACPI_EC_COMMAND_POLL;
} else
goto err;
- } else if (t->wlen == t->wi &&
- (status & ACPI_EC_FLAG_IBF) == 0)
- t->done = true;
- goto unlock;
+ return wakeup;
+ }
err:
/*
* If SCI bit is set, then don't think it's a false IRQ
* otherwise will take a not handled IRQ as a false one.
*/
- if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI))
- ++t->irq_count;
+ if (!(status & ACPI_EC_FLAG_SCI)) {
+ if (in_interrupt() && t)
+ ++t->irq_count;
+ }
+ return wakeup;
+}
-unlock:
- spin_unlock_irqrestore(&ec->lock, flags);
+static void start_transaction(struct acpi_ec *ec)
+{
+ ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
+ ec->curr->flags = 0;
+ (void)advance_transaction(ec);
}
static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
@@ -228,15 +256,17 @@ static int ec_poll(struct acpi_ec *ec)
/* don't sleep with disabled interrupts */
if (EC_FLAGS_MSI || irqs_disabled()) {
udelay(ACPI_EC_MSI_UDELAY);
- if (ec_transaction_done(ec))
+ if (ec_transaction_completed(ec))
return 0;
} else {
if (wait_event_timeout(ec->wait,
- ec_transaction_done(ec),
+ ec_transaction_completed(ec),
msecs_to_jiffies(1)))
return 0;
}
- advance_transaction(ec, acpi_ec_read_status(ec));
+ spin_lock_irqsave(&ec->lock, flags);
+ (void)advance_transaction(ec);
+ spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
spin_lock_irqsave(&ec->lock, flags);
@@ -268,23 +298,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
return ret;
}
-static int ec_check_ibf0(struct acpi_ec *ec)
-{
- u8 status = acpi_ec_read_status(ec);
- return (status & ACPI_EC_FLAG_IBF) == 0;
-}
-
-static int ec_wait_ibf0(struct acpi_ec *ec)
-{
- unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
- /* interrupt wait manually if GPE mode is not active */
- while (time_before(jiffies, delay))
- if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
- msecs_to_jiffies(1)))
- return 0;
- return -ETIME;
-}
-
static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
{
int status;
@@ -305,12 +318,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
goto unlock;
}
}
- if (ec_wait_ibf0(ec)) {
- pr_err("input buffer is not empty, "
- "aborting transaction\n");
- status = -ETIME;
- goto end;
- }
pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
t->command, t->wdata ? t->wdata[0] : 0);
/* disable GPE during transaction if storm is detected */
@@ -334,7 +341,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
}
pr_debug("transaction end\n");
-end:
if (ec->global_lock)
acpi_release_global_lock(glk);
unlock:
@@ -634,17 +640,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
u32 gpe_number, void *data)
{
+ unsigned long flags;
struct acpi_ec *ec = data;
- u8 status = acpi_ec_read_status(ec);
- pr_debug("~~~> interrupt, status:0x%02x\n", status);
-
- advance_transaction(ec, status);
- if (ec_transaction_done(ec) &&
- (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
+ spin_lock_irqsave(&ec->lock, flags);
+ if (advance_transaction(ec))
wake_up(&ec->wait);
- ec_check_sci(ec, acpi_ec_read_status(ec));
- }
+ spin_unlock_irqrestore(&ec->lock, flags);
+ ec_check_sci(ec, acpi_ec_read_status(ec));
return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
}
@@ -1066,8 +1069,10 @@ int __init acpi_ec_ecdt_probe(void)
/* fall through */
}
- if (EC_FLAGS_SKIP_DSDT_SCAN)
+ if (EC_FLAGS_SKIP_DSDT_SCAN) {
+ kfree(saved_ec);
return -ENODEV;
+ }
/* This workaround is needed only on some broken machines,
* which require early EC, but fail to provide ECDT */
@@ -1105,6 +1110,7 @@ install:
}
error:
kfree(boot_ec);
+ kfree(saved_ec);
boot_ec = NULL;
return -ENODEV;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 0bdacc5e26a3..2ba8f02ced36 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &ares->data.memory24;
- if (!memory24->address_length)
+ if (!memory24->minimum && !memory24->address_length)
return false;
acpi_dev_get_memresource(res, memory24->minimum,
memory24->address_length,
@@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &ares->data.memory32;
- if (!memory32->address_length)
+ if (!memory32->minimum && !memory32->address_length)
return false;
acpi_dev_get_memresource(res, memory32->minimum,
memory32->address_length,
@@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &ares->data.fixed_memory32;
- if (!fixed_memory32->address_length)
+ if (!fixed_memory32->address && !fixed_memory32->address_length)
return false;
acpi_dev_get_memresource(res, fixed_memory32->address,
fixed_memory32->address_length,
@@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IO:
io = &ares->data.io;
- if (!io->address_length)
+ if (!io->minimum && !io->address_length)
return false;
acpi_dev_get_ioresource(res, io->minimum,
io->address_length,
@@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
fixed_io = &ares->data.fixed_io;
- if (!fixed_io->address_length)
+ if (!fixed_io->address && !fixed_io->address_length)
return false;
acpi_dev_get_ioresource(res, fixed_io->address,
fixed_io->address_length,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index fb9ffe9adc64..350d52a8f781 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
-static bool brightness_switch_enabled;
+static bool brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
/*
@@ -241,13 +241,14 @@ static bool acpi_video_use_native_backlight(void)
return use_native_backlight_dmi;
}
-static bool acpi_video_verify_backlight_support(void)
+bool acpi_video_verify_backlight_support(void)
{
if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
backlight_device_registered(BACKLIGHT_RAW))
return false;
return acpi_video_backlight_support();
}
+EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support);
/* backlight device sysfs support */
static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -563,6 +564,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
},
{
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer TravelMate B113",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
+ },
+ },
+ {
.callback = video_set_use_native_backlight,
.ident = "HP ProBook 4340s",
.matches = {
@@ -572,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 4540s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
.ident = "HP ProBook 2013 models",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 33e3db548a29..c42feb2bacd0 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -166,6 +166,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Dell Inspiron 5737",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
+ },
+ },
{ },
};
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dae5607e1115..4cd52a4541a9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 05882e4445a6..5513296e5e2e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -371,7 +371,9 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
int pmp, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
int ahci_stop_engine(struct ata_port *ap);
+void ahci_start_fis_rx(struct ata_port *ap);
void ahci_start_engine(struct ata_port *ap);
int ahci_check_ready(struct ata_link *link);
int ahci_kick_engine(struct ata_port *ap);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 3a901520c62b..cac4360f272a 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -58,6 +58,8 @@ enum ahci_imx_type {
struct imx_ahci_priv {
struct platform_device *ahci_pdev;
enum ahci_imx_type type;
+ struct clk *sata_clk;
+ struct clk *sata_ref_clk;
struct clk *ahb_clk;
struct regmap *gpr;
bool no_device;
@@ -224,7 +226,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
return ret;
}
- ret = ahci_platform_enable_clks(hpriv);
+ ret = clk_prepare_enable(imxpriv->sata_ref_clk);
if (ret < 0)
goto disable_regulator;
@@ -291,7 +293,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
!IMX6Q_GPR13_SATA_MPLL_CLK_EN);
}
- ahci_platform_disable_clks(hpriv);
+ clk_disable_unprepare(imxpriv->sata_ref_clk);
if (hpriv->target_pwr)
regulator_disable(hpriv->target_pwr);
@@ -324,6 +326,9 @@ static void ahci_imx_error_handler(struct ata_port *ap)
writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
imx_sata_disable(hpriv);
imxpriv->no_device = true;
+
+ dev_info(ap->dev, "no device found, disabling link.\n");
+ dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n");
}
static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
@@ -385,6 +390,19 @@ static int imx_ahci_probe(struct platform_device *pdev)
imxpriv->no_device = false;
imxpriv->first_time = true;
imxpriv->type = (enum ahci_imx_type)of_id->data;
+
+ imxpriv->sata_clk = devm_clk_get(dev, "sata");
+ if (IS_ERR(imxpriv->sata_clk)) {
+ dev_err(dev, "can't get sata clock.\n");
+ return PTR_ERR(imxpriv->sata_clk);
+ }
+
+ imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
+ if (IS_ERR(imxpriv->sata_ref_clk)) {
+ dev_err(dev, "can't get sata_ref clock.\n");
+ return PTR_ERR(imxpriv->sata_ref_clk);
+ }
+
imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(imxpriv->ahb_clk)) {
dev_err(dev, "can't get ahb clock.\n");
@@ -407,10 +425,14 @@ static int imx_ahci_probe(struct platform_device *pdev)
hpriv->plat_data = imxpriv;
- ret = imx_sata_enable(hpriv);
+ ret = clk_prepare_enable(imxpriv->sata_clk);
if (ret)
return ret;
+ ret = imx_sata_enable(hpriv);
+ if (ret)
+ goto disable_clk;
+
/*
* Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
* and IP vendor specific register IMX_TIMER1MS.
@@ -435,16 +457,24 @@ static int imx_ahci_probe(struct platform_device *pdev)
ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
0, 0, 0);
if (ret)
- imx_sata_disable(hpriv);
+ goto disable_sata;
+ return 0;
+
+disable_sata:
+ imx_sata_disable(hpriv);
+disable_clk:
+ clk_disable_unprepare(imxpriv->sata_clk);
return ret;
}
static void ahci_imx_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
imx_sata_disable(hpriv);
+ clk_disable_unprepare(imxpriv->sata_clk);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index ebe505c17763..b10d81ddb528 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -58,7 +58,7 @@ static int ahci_probe(struct platform_device *pdev)
}
if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
- hflags |= AHCI_HFLAG_NO_FBS;
+ hflags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
hflags, 0, 0);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 042a9bb45c86..ee3a3659bd9e 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -78,6 +78,7 @@
struct xgene_ahci_context {
struct ahci_host_priv *hpriv;
struct device *dev;
+ u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
void __iomem *csr_core; /* Core CSR address of IP */
void __iomem *csr_diag; /* Diag CSR address of IP */
void __iomem *csr_axi; /* AXI CSR address of IP */
@@ -98,20 +99,62 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
}
/**
+ * xgene_ahci_restart_engine - Restart the dma engine.
+ * @ap : ATA port of interest
+ *
+ * Restarts the dma engine inside the controller.
+ */
+static int xgene_ahci_restart_engine(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ ahci_stop_engine(ap);
+ ahci_start_fis_rx(ap);
+ hpriv->start_engine(ap);
+
+ return 0;
+}
+
+/**
+ * xgene_ahci_qc_issue - Issue commands to the device
+ * @qc: Command to issue
+ *
+ * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
+ * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
+ * state machine goes into the CMFatalErrorUpdate state and locks up. By
+ * restarting the dma engine, it removes the controller out of lock up state.
+ */
+static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct xgene_ahci_context *ctx = hpriv->plat_data;
+ int rc = 0;
+
+ if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
+ xgene_ahci_restart_engine(ap);
+
+ rc = ahci_qc_issue(qc);
+
+ /* Save the last command issued */
+ ctx->last_cmd[ap->port_no] = qc->tf.command;
+
+ return rc;
+}
+
+/**
* xgene_ahci_read_id - Read ID data from the specified device
* @dev: device
* @tf: proposed taskfile
* @id: data buffer
*
* This custom read ID function is required due to the fact that the HW
- * does not support DEVSLP and the controller state machine may get stuck
- * after processing the ID query command.
+ * does not support DEVSLP.
*/
static unsigned int xgene_ahci_read_id(struct ata_device *dev,
struct ata_taskfile *tf, u16 *id)
{
u32 err_mask;
- void __iomem *port_mmio = ahci_port_base(dev->link->ap);
err_mask = ata_do_dev_read_id(dev, tf, id);
if (err_mask)
@@ -133,16 +176,6 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
*/
id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
- /*
- * Due to HW errata, restart the port if no other command active.
- * Otherwise the controller may get stuck.
- */
- if (!readl(port_mmio + PORT_CMD_ISSUE)) {
- writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* Force a barrier */
- writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* Force a barrier */
- }
return 0;
}
@@ -300,6 +333,7 @@ static struct ata_port_operations xgene_ahci_ops = {
.host_stop = xgene_ahci_host_stop,
.hardreset = xgene_ahci_hardreset,
.read_id = xgene_ahci_read_id,
+ .qc_issue = xgene_ahci_qc_issue,
};
static const struct ata_port_info xgene_ahci_port_info = {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 40ea583d3610..d72ce0470309 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -68,7 +68,6 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
@@ -620,7 +619,7 @@ int ahci_stop_engine(struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ahci_stop_engine);
-static void ahci_start_fis_rx(struct ata_port *ap)
+void ahci_start_fis_rx(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -646,6 +645,7 @@ static void ahci_start_fis_rx(struct ata_port *ap)
/* flush */
readl(port_mmio + PORT_CMD);
}
+EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
static int ahci_stop_fis_rx(struct ata_port *ap)
{
@@ -1945,7 +1945,7 @@ irqreturn_t ahci_interrupt(int irq, void *dev_instance)
}
EXPORT_SYMBOL_GPL(ahci_interrupt);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = ahci_port_base(ap);
@@ -1974,6 +1974,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
return 0;
}
+EXPORT_SYMBOL_GPL(ahci_qc_issue);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 3a5b4ed25a4f..b0077589f065 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -250,8 +250,13 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
if (IS_ERR(hpriv->phy)) {
rc = PTR_ERR(hpriv->phy);
switch (rc) {
- case -ENODEV:
case -ENOSYS:
+ /* No PHY support. Check if PHY is required. */
+ if (of_find_property(dev->of_node, "phys", NULL)) {
+ dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
+ goto err_out;
+ }
+ case -ENODEV:
/* continue normally */
hpriv->phy = NULL;
break;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 18d97d5c7d90..677c0c1b03bd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
* ata_qc_new - Request an available ATA command, for queueing
* @ap: target port
*
+ * Some ATA host controllers may implement a queue depth which is less
+ * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ * the hardware limitation.
+ *
* LOCKING:
* None.
*/
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
+ unsigned int max_queue = ap->host->n_tags;
unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
- tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+ for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+ tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
+ host->n_tags = ATA_MAX_QUEUE - 1;
host->dev = dev;
host->ops = ops;
}
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{
int i, rc;
+ host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {
dev_err(host->dev, "BUG: trying to register unstarted host\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6760fc4e85b8..dad83df555c4 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
- if (err & ATA_UNC)
+ if (err & (ATA_UNC | ATA_AMNF))
qc->err_mask |= AC_ERR_MEDIA;
if (err & ATA_IDNF)
qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
}
if (cmd->command != ATA_CMD_PACKET &&
- (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
- ATA_ABORTED)))
- ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
+ (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
+ ATA_IDNF | ATA_ABORTED)))
+ ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
res->feature & ATA_ICRC ? "ICRC " : "",
res->feature & ATA_UNC ? "UNC " : "",
+ res->feature & ATA_AMNF ? "AMNF " : "",
res->feature & ATA_IDNF ? "IDNF " : "",
res->feature & ATA_ABORTED ? "ABRT " : "");
#endif
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 6ad5c072ce34..4d37c5415fc7 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
struct ep93xx_pata_data *drv_data;
struct ata_host *host;
struct ata_port *ap;
- unsigned int irq;
+ int irq;
struct resource *mem_res;
void __iomem *ide_base;
int err;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c4778995cd72..f748430bb654 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -18,6 +18,15 @@
#include <linux/mutex.h>
#include <linux/slab.h>
+struct component_match {
+ size_t alloc;
+ size_t num;
+ struct {
+ void *data;
+ int (*fn)(struct device *, void *);
+ } compare[0];
+};
+
struct master {
struct list_head node;
struct list_head components;
@@ -25,6 +34,7 @@ struct master {
const struct component_master_ops *ops;
struct device *dev;
+ struct component_match *match;
};
struct component {
@@ -69,6 +79,11 @@ static void component_detach_master(struct master *master, struct component *c)
c->master = NULL;
}
+/*
+ * Add a component to a master, finding the component via the compare
+ * function and compare data. This is safe to call for duplicate matches
+ * and will not result in the same component being added multiple times.
+ */
int component_master_add_child(struct master *master,
int (*compare)(struct device *, void *), void *compare_data)
{
@@ -76,11 +91,12 @@ int component_master_add_child(struct master *master,
int ret = -ENXIO;
list_for_each_entry(c, &component_list, node) {
- if (c->master)
+ if (c->master && c->master != master)
continue;
if (compare(c->dev, compare_data)) {
- component_attach_master(master, c);
+ if (!c->master)
+ component_attach_master(master, c);
ret = 0;
break;
}
@@ -90,6 +106,34 @@ int component_master_add_child(struct master *master,
}
EXPORT_SYMBOL_GPL(component_master_add_child);
+static int find_components(struct master *master)
+{
+ struct component_match *match = master->match;
+ size_t i;
+ int ret = 0;
+
+ if (!match) {
+ /*
+ * Search the list of components, looking for components that
+ * belong to this master, and attach them to the master.
+ */
+ return master->ops->add_components(master->dev, master);
+ }
+
+ /*
+ * Scan the array of match functions and attach
+ * any components which are found to this master.
+ */
+ for (i = 0; i < match->num; i++) {
+ ret = component_master_add_child(master,
+ match->compare[i].fn,
+ match->compare[i].data);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
/* Detach all attached components from this master */
static void master_remove_components(struct master *master)
{
@@ -113,44 +157,44 @@ static void master_remove_components(struct master *master)
static int try_to_bring_up_master(struct master *master,
struct component *component)
{
- int ret = 0;
+ int ret;
- if (!master->bound) {
- /*
- * Search the list of components, looking for components that
- * belong to this master, and attach them to the master.
- */
- if (master->ops->add_components(master->dev, master)) {
- /* Failed to find all components */
- master_remove_components(master);
- ret = 0;
- goto out;
- }
+ if (master->bound)
+ return 0;
- if (component && component->master != master) {
- master_remove_components(master);
- ret = 0;
- goto out;
- }
+ /*
+ * Search the list of components, looking for components that
+ * belong to this master, and attach them to the master.
+ */
+ if (find_components(master)) {
+ /* Failed to find all components */
+ ret = 0;
+ goto out;
+ }
- if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto out;
- }
+ if (component && component->master != master) {
+ ret = 0;
+ goto out;
+ }
- /* Found all components */
- ret = master->ops->bind(master->dev);
- if (ret < 0) {
- devres_release_group(master->dev, NULL);
- dev_info(master->dev, "master bind failed: %d\n", ret);
- master_remove_components(master);
- goto out;
- }
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
- master->bound = true;
- ret = 1;
+ /* Found all components */
+ ret = master->ops->bind(master->dev);
+ if (ret < 0) {
+ devres_release_group(master->dev, NULL);
+ dev_info(master->dev, "master bind failed: %d\n", ret);
+ goto out;
}
+
+ master->bound = true;
+ return 1;
+
out:
+ master_remove_components(master);
return ret;
}
@@ -180,18 +224,89 @@ static void take_down_master(struct master *master)
master_remove_components(master);
}
-int component_master_add(struct device *dev,
- const struct component_master_ops *ops)
+static size_t component_match_size(size_t num)
+{
+ return offsetof(struct component_match, compare[num]);
+}
+
+static struct component_match *component_match_realloc(struct device *dev,
+ struct component_match *match, size_t num)
+{
+ struct component_match *new;
+
+ if (match && match->alloc == num)
+ return match;
+
+ new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ if (match) {
+ memcpy(new, match, component_match_size(min(match->num, num)));
+ devm_kfree(dev, match);
+ } else {
+ new->num = 0;
+ }
+
+ new->alloc = num;
+
+ return new;
+}
+
+/*
+ * Add a component to be matched.
+ *
+ * The match array is first created or extended if necessary.
+ */
+void component_match_add(struct device *dev, struct component_match **matchptr,
+ int (*compare)(struct device *, void *), void *compare_data)
+{
+ struct component_match *match = *matchptr;
+
+ if (IS_ERR(match))
+ return;
+
+ if (!match || match->num == match->alloc) {
+ size_t new_size = match ? match->alloc + 16 : 15;
+
+ match = component_match_realloc(dev, match, new_size);
+
+ *matchptr = match;
+
+ if (IS_ERR(match))
+ return;
+ }
+
+ match->compare[match->num].fn = compare;
+ match->compare[match->num].data = compare_data;
+ match->num++;
+}
+EXPORT_SYMBOL(component_match_add);
+
+int component_master_add_with_match(struct device *dev,
+ const struct component_master_ops *ops,
+ struct component_match *match)
{
struct master *master;
int ret;
+ if (ops->add_components && match)
+ return -EINVAL;
+
+ if (match) {
+ /* Reallocate the match array for its true size */
+ match = component_match_realloc(dev, match, match->num);
+ if (IS_ERR(match))
+ return PTR_ERR(match);
+ }
+
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
master->dev = dev;
master->ops = ops;
+ master->match = match;
INIT_LIST_HEAD(&master->components);
/* Add to the list of available masters. */
@@ -209,6 +324,13 @@ int component_master_add(struct device *dev,
return ret < 0 ? ret : 0;
}
+EXPORT_SYMBOL_GPL(component_master_add_with_match);
+
+int component_master_add(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ return component_master_add_with_match(dev, ops, NULL);
+}
EXPORT_SYMBOL_GPL(component_master_add);
void component_master_del(struct device *dev,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9e9227e1762d..eee48c49f5de 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
return dev->archdata.irqs[num];
#else
struct resource *r;
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
- return of_irq_get(dev->dev.of_node, num);
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+ int ret;
+
+ ret = of_irq_get(dev->dev.of_node, num);
+ if (ret >= 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
@@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
struct resource *r;
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
- return of_irq_get_byname(dev->dev.of_node, name);
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+ int ret;
+
+ ret = of_irq_get_byname(dev->dev.of_node, name);
+ if (ret >= 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
return r ? r->start : -ENXIO;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1b35c45c92b7..3f2e16738080 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
struct task_struct *opa;
kref_get(&connection->kref);
+ /* We may just have force_sig()'ed this thread
+ * to get it out of some blocking network function.
+ * Clear signals; otherwise kthread_run(), which internally uses
+ * wait_on_completion_killable(), will mistake our pending signal
+ * for a new fatal signal and fail. */
+ flush_signals(current);
opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) {
drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 089e72cd37be..36e54be402df 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- if (reset_capacity) {
+ if (reset_capacity)
set_capacity(zram->disk, 0);
- revalidate_disk(zram->disk);
- }
+
up_write(&zram->init_lock);
+
+ /*
+ * Revalidate disk out of the init_lock to avoid lockdep splat.
+ * It's okay because disk's capacity is protected by init_lock
+ * so that revalidate_disk always sees up-to-date capacity.
+ */
+ if (reset_capacity)
+ revalidate_disk(zram->disk);
}
static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- revalidate_disk(zram->disk);
up_write(&zram->init_lock);
+
+ /*
+ * Revalidate disk out of the init_lock to avoid lockdep splat.
+ * It's okay because disk's capacity is protected by init_lock
+ * so that revalidate_disk always sees up-to-date capacity.
+ */
+ revalidate_disk(zram->disk);
+
return len;
out_destroy_comp:
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index f98380648cb3..f50dffc0374f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0) },
{ USB_DEVICE(0x0CF3, 0x0036) },
{ USB_DEVICE(0x0CF3, 0x3004) },
- { USB_DEVICE(0x0CF3, 0x3005) },
{ USB_DEVICE(0x0CF3, 0x3008) },
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x0CF3, 0x311E) },
@@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a1c80b0c7663..6250fc2fb93a 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 04680ead9275..fede8ca7147c 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
BT_ERR("Non-link packet received in non-active state");
h5_reset_rx(h5);
+ return 0;
}
h5->rx_func = h5_rx_payload;
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index b29703324e94..09f17eb73486 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -710,19 +710,6 @@ static int agp_open(struct inode *inode, struct file *file)
return 0;
}
-
-static ssize_t agp_read(struct file *file, char __user *buf,
- size_t count, loff_t * ppos)
-{
- return -EINVAL;
-}
-
-static ssize_t agp_write(struct file *file, const char __user *buf,
- size_t count, loff_t * ppos)
-{
- return -EINVAL;
-}
-
static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_info userinfo;
@@ -1047,8 +1034,6 @@ static const struct file_operations agp_fops =
{
.owner = THIS_MODULE,
.llseek = no_llseek,
- .read = agp_read,
- .write = agp_write,
.unlocked_ioctl = agp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_agp_ioctl,
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 334601cc81cf..c4419ea1ab07 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex);
static int data_avail;
static u8 *rng_buffer;
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ int wait);
+
static size_t rng_buffer_size(void)
{
return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
}
+static void add_early_randomness(struct hwrng *rng)
+{
+ unsigned char bytes[16];
+ int bytes_read;
+
+ /*
+ * Currently only virtio-rng cannot return data during device
+ * probe, and that's handled in virtio-rng.c itself. If there
+ * are more such devices, this call to rng_get_data can be
+ * made conditional here instead of doing it per-device.
+ */
+ bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ if (bytes_read > 0)
+ add_device_randomness(bytes, bytes_read);
+}
+
static inline int hwrng_init(struct hwrng *rng)
{
- if (!rng->init)
- return 0;
- return rng->init(rng);
+ if (rng->init) {
+ int ret;
+
+ ret = rng->init(rng);
+ if (ret)
+ return ret;
+ }
+ add_early_randomness(rng);
+ return 0;
}
static inline void hwrng_cleanup(struct hwrng *rng)
@@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *old_rng, *tmp;
- unsigned char bytes[16];
- int bytes_read;
if (rng->name == NULL ||
(rng->data_read == NULL && rng->read == NULL))
@@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng)
INIT_LIST_HEAD(&rng->list);
list_add_tail(&rng->list, &rng_list);
- bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
- if (bytes_read > 0)
- add_device_randomness(bytes, bytes_read);
+ if (old_rng && !rng->init) {
+ /*
+ * Use a new device's input to add some randomness to
+ * the system. If this rng device isn't going to be
+ * used right away, its init function hasn't been
+ * called yet; so only use the randomness from devices
+ * that don't need an init callback.
+ */
+ add_early_randomness(rng);
+ }
+
out_unlock:
mutex_unlock(&rng_mutex);
out:
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index f3e71501de54..e9b15bc18b4d 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -38,6 +38,8 @@ struct virtrng_info {
int index;
};
+static bool probe_done;
+
static void random_recv_done(struct virtqueue *vq)
{
struct virtrng_info *vi = vq->vdev->priv;
@@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
+ /*
+ * Don't ask host for data till we're setup. This call can
+ * happen during hwrng_register(), after commit d9e7972619.
+ */
+ if (unlikely(!probe_done))
+ return 0;
+
if (!vi->busy) {
vi->busy = true;
init_completion(&vi->have_data);
@@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev)
return err;
}
+ probe_done = true;
return 0;
}
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index d915707d2ba1..93dcad0c1cbe 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs)
if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(old_mask, &current->cpus_allowed);
- set_cpus_allowed_ptr(current, cpumask_of(0));
+ rc = set_cpus_allowed_ptr(current, cpumask_of(0));
+ if (rc)
+ goto out;
if (smp_processor_id() != 0) {
rc = -EBUSY;
goto out;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0a7ac0a7b252..71529e196b84 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -641,7 +641,7 @@ retry:
} while (unlikely(entropy_count < pool_size-2 && pnfrac));
}
- if (entropy_count < 0) {
+ if (unlikely(entropy_count < 0)) {
pr_warn("random: negative entropy/overflow: pool %s count %d\n",
r->name, entropy_count);
WARN_ON(1);
@@ -981,7 +981,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
int reserved)
{
int entropy_count, orig;
- size_t ibytes;
+ size_t ibytes, nfrac;
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
@@ -999,7 +999,17 @@ retry:
}
if (ibytes < min)
ibytes = 0;
- if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0)
+
+ if (unlikely(entropy_count < 0)) {
+ pr_warn("random: negative entropy count: pool %s count %d\n",
+ r->name, entropy_count);
+ WARN_ON(1);
+ entropy_count = 0;
+ }
+ nfrac = ibytes << (ENTROPY_SHIFT + 3);
+ if ((size_t) entropy_count > nfrac)
+ entropy_count -= nfrac;
+ else
entropy_count = 0;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
@@ -1376,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
"with %d bits of entropy available\n",
current->comm, nonblocking_pool.entropy_total);
+ nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 9b7b5859a420..3757e9e72d37 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -230,16 +230,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
goto err_reg;
}
- s2mps11_clk->lookup = devm_kzalloc(&pdev->dev,
- sizeof(struct clk_lookup), GFP_KERNEL);
+ s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk,
+ s2mps11_name(s2mps11_clk), NULL);
if (!s2mps11_clk->lookup) {
ret = -ENOMEM;
goto err_lup;
}
- s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
- s2mps11_clk->lookup->clk = s2mps11_clk->clk;
-
clkdev_add(s2mps11_clk->lookup);
}
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 12f3c0b64fcd..4c449b3170f6 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -1209,7 +1209,7 @@ static struct clk_branch rot_clk = {
static u8 mmcc_pxo_hdmi_map[] = {
[P_PXO] = 0,
- [P_HDMI_PLL] = 2,
+ [P_HDMI_PLL] = 3,
};
static const char *mmcc_pxo_hdmi[] = {
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 4f150c9dd38c..7f4a473a7ad7 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -925,21 +925,13 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
0, 0),
GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
- GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "div_pwm_isp",
- E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp_pre",
- E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp_pre",
- E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp",
- E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "sclk_pwm_isp",
+ GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "div_pwm_isp",
E4X12_GATE_IP_ISP, 0, 0, 0),
- GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "sclk_spi0_isp",
+ GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "div_spi0_isp_pre",
E4X12_GATE_IP_ISP, 1, 0, 0),
- GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "sclk_spi1_isp",
+ GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "div_spi1_isp_pre",
E4X12_GATE_IP_ISP, 2, 0, 0),
- GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "sclk_uart_isp",
+ GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "div_uart_isp",
E4X12_GATE_IP_ISP, 3, 0, 0),
GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 1fad4c5e3f5d..184f64293b26 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -661,7 +661,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
- GATE_IP_DISP1, 2, 0, 0),
+ GATE_IP_DISP1, 9, 0, 0),
GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
GATE_IP_DISP1, 8, 0, 0),
GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0),
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 9d7d7eed03fd..a4e6cc782e5c 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -631,7 +631,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
SRC_TOP4, 16, 1),
MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1),
MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1),
- MUX(0, "mout_user_aclk333", mout_user_aclk333_p, SRC_TOP4, 28, 1),
+ MUX(CLK_MOUT_USER_ACLK333, "mout_user_aclk333", mout_user_aclk333_p,
+ SRC_TOP4, 28, 1),
MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p,
SRC_TOP5, 0, 1),
@@ -684,7 +685,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
SRC_TOP11, 12, 1),
MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1),
MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1),
- MUX(0, "mout_sw_aclk333", mout_sw_aclk333_p, SRC_TOP11, 28, 1),
+ MUX(CLK_MOUT_SW_ACLK333, "mout_sw_aclk333", mout_sw_aclk333_p,
+ SRC_TOP11, 28, 1),
MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p,
SRC_TOP12, 4, 1),
@@ -890,8 +892,6 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
- GATE(CLK_ACLK66_PERIC, "aclk66_peric", "mout_user_aclk66_peric",
- GATE_BUS_TOP, 11, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
GATE_BUS_TOP, 13, 0, 0),
GATE(0, "aclk166", "mout_user_aclk166",
@@ -994,34 +994,61 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
/* PERIC Block */
- GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_IP_PERIC, 0, 0, 0),
- GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_IP_PERIC, 1, 0, 0),
- GATE(CLK_UART2, "uart2", "aclk66_peric", GATE_IP_PERIC, 2, 0, 0),
- GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_IP_PERIC, 3, 0, 0),
- GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_IP_PERIC, 6, 0, 0),
- GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_IP_PERIC, 7, 0, 0),
- GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_IP_PERIC, 8, 0, 0),
- GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_IP_PERIC, 9, 0, 0),
- GATE(CLK_USI0, "usi0", "aclk66_peric", GATE_IP_PERIC, 10, 0, 0),
- GATE(CLK_USI1, "usi1", "aclk66_peric", GATE_IP_PERIC, 11, 0, 0),
- GATE(CLK_USI2, "usi2", "aclk66_peric", GATE_IP_PERIC, 12, 0, 0),
- GATE(CLK_USI3, "usi3", "aclk66_peric", GATE_IP_PERIC, 13, 0, 0),
- GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_IP_PERIC, 14, 0, 0),
- GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_IP_PERIC, 15, 0, 0),
- GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_IP_PERIC, 16, 0, 0),
- GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_IP_PERIC, 17, 0, 0),
- GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_IP_PERIC, 18, 0, 0),
- GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_IP_PERIC, 20, 0, 0),
- GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_IP_PERIC, 21, 0, 0),
- GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_IP_PERIC, 22, 0, 0),
- GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_IP_PERIC, 23, 0, 0),
- GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_IP_PERIC, 24, 0, 0),
- GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_IP_PERIC, 26, 0, 0),
- GATE(CLK_USI4, "usi4", "aclk66_peric", GATE_IP_PERIC, 28, 0, 0),
- GATE(CLK_USI5, "usi5", "aclk66_peric", GATE_IP_PERIC, 30, 0, 0),
- GATE(CLK_USI6, "usi6", "aclk66_peric", GATE_IP_PERIC, 31, 0, 0),
-
- GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
+ GATE(CLK_UART0, "uart0", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 0, 0, 0),
+ GATE(CLK_UART1, "uart1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 1, 0, 0),
+ GATE(CLK_UART2, "uart2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 2, 0, 0),
+ GATE(CLK_UART3, "uart3", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 3, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 6, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 7, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 8, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 9, 0, 0),
+ GATE(CLK_USI0, "usi0", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 10, 0, 0),
+ GATE(CLK_USI1, "usi1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 11, 0, 0),
+ GATE(CLK_USI2, "usi2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 12, 0, 0),
+ GATE(CLK_USI3, "usi3", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 13, 0, 0),
+ GATE(CLK_I2C_HDMI, "i2c_hdmi", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 14, 0, 0),
+ GATE(CLK_TSADC, "tsadc", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 15, 0, 0),
+ GATE(CLK_SPI0, "spi0", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 16, 0, 0),
+ GATE(CLK_SPI1, "spi1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 17, 0, 0),
+ GATE(CLK_SPI2, "spi2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 18, 0, 0),
+ GATE(CLK_I2S1, "i2s1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 20, 0, 0),
+ GATE(CLK_I2S2, "i2s2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 21, 0, 0),
+ GATE(CLK_PCM1, "pcm1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 22, 0, 0),
+ GATE(CLK_PCM2, "pcm2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 23, 0, 0),
+ GATE(CLK_PWM, "pwm", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 24, 0, 0),
+ GATE(CLK_SPDIF, "spdif", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 26, 0, 0),
+ GATE(CLK_USI4, "usi4", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 28, 0, 0),
+ GATE(CLK_USI5, "usi5", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 30, 0, 0),
+ GATE(CLK_USI6, "usi6", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 31, 0, 0),
+
+ GATE(CLK_KEYIF, "keyif", "mout_user_aclk66_peric",
+ GATE_BUS_PERIC, 22, 0, 0),
/* PERIS Block */
GATE(CLK_CHIPID, "chipid", "aclk66_psgen",
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index ba0716801db2..140f4733c02e 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -152,6 +152,11 @@ struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
ALIAS(HCLK, NULL, "hclk"),
ALIAS(MPLL, NULL, "mpll"),
ALIAS(FCLK, NULL, "fclk"),
+ ALIAS(PCLK, NULL, "watchdog"),
+ ALIAS(PCLK_SDI, NULL, "sdi"),
+ ALIAS(HCLK_NAND, NULL, "nand"),
+ ALIAS(PCLK_I2S, NULL, "iis"),
+ ALIAS(PCLK_I2C, NULL, "i2c"),
};
/* S3C2410 specific clocks */
@@ -378,7 +383,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
if (!np)
s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
- if (current_soc == 2410) {
+ if (current_soc == S3C2410) {
if (_get_rate("xti") == 12 * MHZ) {
s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
@@ -432,7 +437,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor,
ARRAY_SIZE(s3c2410_ffactor));
samsung_clk_register_alias(ctx, s3c2410_aliases,
- ARRAY_SIZE(s3c2410_common_aliases));
+ ARRAY_SIZE(s3c2410_aliases));
break;
case S3C2440:
samsung_clk_register_mux(ctx, s3c2440_muxes,
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index efa16ee592c8..8889ff1c10fc 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -418,8 +418,10 @@ static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"),
ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"),
ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"),
- ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"),
- ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"),
+ ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi_busclk0"),
+ ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi_busclk2"),
+ ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi_busclk0"),
+ ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi_busclk2"),
ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"),
ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"),
ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"),
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index c2d204315546..bb5f387774e2 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { }
/* array of all spear 320 clock lookups */
#ifdef CONFIG_MACH_SPEAR320
-#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000)
+#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010)
#define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018)
#define SPEAR320_UARTX_PCLK_MASK 0x1
@@ -245,7 +245,8 @@ static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
"ras_syn0_gclk", };
static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
-static void __init spear320_clk_init(void __iomem *soc_config_base)
+static void __init spear320_clk_init(void __iomem *soc_config_base,
+ struct clk *ras_apb_clk)
{
struct clk *clk;
@@ -342,6 +343,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK,
0, &_lock);
clk_register_clkdev(clk, NULL, "a3000000.serial");
+ /* Enforce ras_apb_clk */
+ clk_set_parent(clk, ras_apb_clk);
clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
ARRAY_SIZE(uartx_parents),
@@ -349,6 +352,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a4000000.serial");
+ /* Enforce ras_apb_clk */
+ clk_set_parent(clk, ras_apb_clk);
clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
ARRAY_SIZE(uartx_parents),
@@ -379,12 +384,12 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
clk_register_clkdev(clk, NULL, "60100000.serial");
}
#else
-static inline void spear320_clk_init(void __iomem *soc_config_base) { }
+static inline void spear320_clk_init(void __iomem *sb, struct clk *rc) { }
#endif
void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
{
- struct clk *clk, *clk1;
+ struct clk *clk, *clk1, *ras_apb_clk;
clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
32000);
@@ -613,6 +618,7 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
RAS_APB_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, "ras_apb_clk", NULL);
+ ras_apb_clk = clk;
clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
@@ -659,5 +665,5 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
else if (of_machine_is_compatible("st,spear310"))
spear310_clk_init();
else if (of_machine_is_compatible("st,spear320"))
- spear320_clk_init(soc_config_base);
+ spear320_clk_init(soc_config_base, ras_apb_clk);
}
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 44cd27c5c401..670f90d629d7 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -29,7 +29,7 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, r);
- if (!reg)
+ if (IS_ERR(reg))
return PTR_ERR(reg);
clk_parent = of_clk_get_parent_name(np, 0);
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 5428c9c547cd..72d97279eae1 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -77,13 +77,11 @@ static int dra7_apll_enable(struct clk_hw *hw)
if (i == MAX_APLL_WAIT_TRIES) {
pr_warn("clock: %s failed transition to '%s'\n",
clk_name, (state) ? "locked" : "bypassed");
- } else {
+ r = -EBUSY;
+ } else
pr_debug("clock: %s transition to '%s' in %d loops\n",
clk_name, (state) ? "locked" : "bypassed", i);
- r = 0;
- }
-
return r;
}
@@ -338,7 +336,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
const char *parent_name;
u32 val;
- ad = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ ad = kzalloc(sizeof(*ad), GFP_KERNEL);
clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
init = kzalloc(sizeof(*init), GFP_KERNEL);
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index e1581335937d..cb8e6f14e880 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -16,7 +16,7 @@
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
-#define DRA7_DPLL_ABE_DEFFREQ 361267200
+#define DRA7_DPLL_ABE_DEFFREQ 180633600
#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
@@ -322,6 +322,11 @@ int __init dra7xx_dt_clk_init(void)
if (rc)
pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+ dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
+ rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
+
dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
if (rc)
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index abd956d5f838..79791e1bf282 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -161,7 +161,8 @@ cleanup:
}
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
- defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX)
+ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
+ defined(CONFIG_SOC_AM43XX)
/**
* ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
* @node: device node for this clock
@@ -322,7 +323,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
of_ti_omap4_dpll_x2_setup);
#endif
-#ifdef CONFIG_SOC_AM33XX
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
{
ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 0197a478720c..e9d650e51287 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -160,7 +160,7 @@ static void of_mux_clk_setup(struct device_node *node)
u8 clk_mux_flags = 0;
u32 mask = 0;
u32 shift = 0;
- u32 flags = 0;
+ u32 flags = CLK_SET_RATE_NO_REPARENT;
num_parents = of_clk_get_parent_count(node);
if (num_parents < 2) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index f71d55f5e6e5..ab51bf20a3ed 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -162,7 +162,7 @@ static void exynos4_mct_frc_start(void)
exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
}
-static cycle_t exynos4_frc_read(struct clocksource *cs)
+static cycle_t notrace _exynos4_frc_read(void)
{
unsigned int lo, hi;
u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
@@ -176,6 +176,11 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
return ((cycle_t)hi << 32) | lo;
}
+static cycle_t exynos4_frc_read(struct clocksource *cs)
+{
+ return _exynos4_frc_read();
+}
+
static void exynos4_frc_resume(struct clocksource *cs)
{
exynos4_mct_frc_start();
@@ -192,13 +197,24 @@ struct clocksource mct_frc = {
static u64 notrace exynos4_read_sched_clock(void)
{
- return exynos4_frc_read(&mct_frc);
+ return _exynos4_frc_read();
+}
+
+static struct delay_timer exynos4_delay_timer;
+
+static cycles_t exynos4_read_current_timer(void)
+{
+ return _exynos4_frc_read();
}
static void __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start();
+ exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
+ exynos4_delay_timer.freq = clk_rate;
+ register_current_timer_delay(&exynos4_delay_timer);
+
if (clocksource_register_hz(&mct_frc, clk_rate))
panic("%s: can't register clocksource\n", mct_frc.name);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ebac67115009..7364a538e056 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6 cpufreq support"
depends on ARCH_MXC
depends on REGULATOR_ANATOP
+ select PM_OPP
help
This adds cpufreq driver support for Freescale i.MX6 series SoCs.
@@ -118,7 +119,7 @@ config ARM_INTEGRATOR
If in doubt, say Y.
config ARM_KIRKWOOD_CPUFREQ
- def_bool MACH_KIRKWOOD
+ def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
help
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 738c8b7b17dc..db6d9a2fea4d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
-obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o
+obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index ee1ae303a07c..86beda9f950b 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
goto out_put_reg;
}
- ret = of_init_opp_table(cpu_dev);
- if (ret) {
- pr_err("failed to init OPP table: %d\n", ret);
- goto out_put_clk;
- }
+ /* OPPs might be populated at runtime, don't check for error here */
+ of_init_opp_table(cpu_dev);
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 62259d27f03e..6f024852c6fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
* the creation of a brand new one. So we need to perform this update
* by invoking update_policy_cpu().
*/
- if (recover_policy && cpu != policy->cpu)
+ if (recover_policy && cpu != policy->cpu) {
update_policy_cpu(policy, cpu);
- else
+ WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
+ } else {
policy->cpu = cpu;
+ }
cpumask_copy(policy->cpus, cpumask_of(cpu));
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 924bb2d42b1c..86631cb6f7de 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -128,6 +128,7 @@ static struct pstate_funcs pstate_funcs;
struct perf_limits {
int no_turbo;
+ int turbo_disabled;
int max_perf_pct;
int min_perf_pct;
int32_t max_perf;
@@ -287,7 +288,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
limits.no_turbo = clamp_t(int, input, 0 , 1);
-
+ if (limits.turbo_disabled) {
+ pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+ limits.no_turbo = limits.turbo_disabled;
+ }
return count;
}
@@ -357,21 +361,21 @@ static int byt_get_min_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return (value >> 8) & 0x3F;
+ return (value >> 8) & 0x7F;
}
static int byt_get_max_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return (value >> 16) & 0x3F;
+ return (value >> 16) & 0x7F;
}
static int byt_get_turbo_pstate(void)
{
u64 value;
rdmsrl(BYT_TURBO_RATIOS, value);
- return value & 0x3F;
+ return value & 0x7F;
}
static void byt_set_pstate(struct cpudata *cpudata, int pstate)
@@ -381,7 +385,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
u32 vid;
val = pstate << 8;
- if (limits.no_turbo)
+ if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -405,8 +409,8 @@ static void byt_get_vid(struct cpudata *cpudata)
rdmsrl(BYT_VIDS, value);
- cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
- cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
+ cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
+ cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
cpudata->vid.ratio = div_fp(
cpudata->vid.max - cpudata->vid.min,
int_tofp(cpudata->pstate.max_pstate -
@@ -448,7 +452,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
u64 val;
val = pstate << 8;
- if (limits.no_turbo)
+ if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32;
wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
@@ -696,9 +700,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum];
- intel_pstate_get_cpu_pstates(cpu);
-
cpu->cpu = cpunum;
+ intel_pstate_get_cpu_pstates(cpu);
init_timer_deferrable(&cpu->timer);
cpu->timer.function = intel_pstate_timer_func;
@@ -741,7 +744,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.min_perf = int_tofp(1);
limits.max_perf_pct = 100;
limits.max_perf = int_tofp(1);
- limits.no_turbo = 0;
+ limits.no_turbo = limits.turbo_disabled;
return 0;
}
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -784,6 +787,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
int rc;
+ u64 misc_en;
rc = intel_pstate_init_cpu(policy->cpu);
if (rc)
@@ -791,8 +795,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
- if (!limits.no_turbo &&
- limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+ if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
+ cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
+ limits.turbo_disabled = 1;
+ limits.no_turbo = 1;
+ }
+ if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 546376719d8f..b5befc211172 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void)
name = "K4S641632D";
if (machine_is_h3100())
name = "KM416S4030CT";
- if (machine_is_jornada720())
+ if (machine_is_jornada720() || machine_is_h3600())
name = "K4S281632B-1H";
if (machine_is_nanoengine())
name = "MT48LC8M16A2TG-75";
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1d80bd3636c5..b512a4ba7569 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev)
int error;
jrdev = &pdev->dev;
- jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
- GFP_KERNEL);
+ jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
+ GFP_KERNEL);
if (!jrpriv)
return -ENOMEM;
@@ -487,10 +487,8 @@ static int caam_jr_probe(struct platform_device *pdev)
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
- if (error) {
- kfree(jrpriv);
+ if (error)
return error;
- }
jrpriv->dev = jrdev;
spin_lock(&driver_data.jr_alloc_lock);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d028f36ae655..8f8b0b608875 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -86,6 +86,9 @@
#define USBSS_IRQ_PD_COMP (1 << 2)
+/* Packet Descriptor */
+#define PD2_ZERO_LENGTH (1 << 19)
+
struct cppi41_channel {
struct dma_chan chan;
struct dma_async_tx_descriptor txd;
@@ -307,7 +310,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
__iormb();
while (val) {
- u32 desc;
+ u32 desc, len;
q_num = __fls(val);
val &= ~(1 << q_num);
@@ -319,9 +322,13 @@ static irqreturn_t cppi41_irq(int irq, void *data)
q_num, desc);
continue;
}
- c->residue = pd_trans_len(c->desc->pd6) -
- pd_trans_len(c->desc->pd0);
+ if (c->desc->pd2 & PD2_ZERO_LENGTH)
+ len = 0;
+ else
+ len = pd_trans_len(c->desc->pd0);
+
+ c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
c->txd.callback(c->txd.callback_param);
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 128714622bf5..14867e3ac8ff 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -255,6 +255,7 @@ struct sdma_channel {
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
unsigned int num_bd;
+ unsigned int period_len;
struct sdma_buffer_descriptor *bd;
dma_addr_t bd_phys;
unsigned int pc_from_device, pc_to_device;
@@ -593,6 +594,12 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
{
+ if (sdmac->desc.callback)
+ sdmac->desc.callback(sdmac->desc.callback_param);
+}
+
+static void sdma_update_channel_loop(struct sdma_channel *sdmac)
+{
struct sdma_buffer_descriptor *bd;
/*
@@ -611,9 +618,6 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
bd->mode.status |= BD_DONE;
sdmac->buf_tail++;
sdmac->buf_tail %= sdmac->num_bd;
-
- if (sdmac->desc.callback)
- sdmac->desc.callback(sdmac->desc.callback_param);
}
}
@@ -669,6 +673,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ sdma_update_channel_loop(sdmac);
+
tasklet_schedule(&sdmac->tasklet);
__clear_bit(channel, &stat);
@@ -1129,6 +1136,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
sdmac->buf_tail = 0;
+ sdmac->period_len = period_len;
sdmac->flags |= IMX_DMA_SG_LOOP;
sdmac->direction = direction;
@@ -1225,9 +1233,15 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ u32 residue;
+
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
+ else
+ residue = sdmac->chn_count - sdmac->chn_real_count;
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
- sdmac->chn_count - sdmac->chn_real_count);
+ residue);
return sdmac->status;
}
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 4199849e3758..145974f9662b 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,4 +1,5 @@
menu "IEEE 1394 (FireWire) support"
+ depends on HAS_DMA
depends on PCI || COMPILE_TEST
# firewire-core does not depend on PCI but is
# not useful without PCI controller driver
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 57985410f12f..a66a3217f1d9 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -336,10 +336,10 @@ static const struct {
QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
- QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+ QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
- 0},
+ QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index eff1a2f22f09..dc79346689e6 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -346,6 +346,7 @@ static __initdata struct {
struct param_info {
int verbose;
+ int found;
void *params;
};
@@ -362,16 +363,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
return 0;
- pr_info("Getting parameters from FDT:\n");
-
for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
- if (!prop) {
- pr_err("Can't find %s in device tree!\n",
- dt_params[i].name);
+ if (!prop)
return 0;
- }
dest = info->params + dt_params[i].offset;
+ info->found++;
val = of_read_number(prop, len / sizeof(u32));
@@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
{
struct param_info info;
+ int ret;
+
+ pr_info("Getting EFI parameters from FDT:\n");
info.verbose = verbose;
+ info.found = 0;
info.params = params;
- return of_scan_flat_dt(fdt_find_uefi_params, &info);
+ ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
+ if (!info.found)
+ pr_info("UEFI not found.\n");
+ else if (!ret)
+ pr_err("Can't find '%s' in device tree!\n",
+ dt_params[info.found].name);
+
+ return ret;
}
#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c
index 82d774161cc9..507a3df46a5d 100644
--- a/drivers/firmware/efi/fdt.c
+++ b/drivers/firmware/efi/fdt.c
@@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
u32 fdt_val32;
u64 fdt_val64;
- /*
- * Copy definition of linux_banner here. Since this code is
- * built as part of the decompressor for ARM v7, pulling
- * in version.c where linux_banner is defined for the
- * kernel brings other kernel dependencies with it.
- */
- const char linux_banner[] =
- "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
- LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
-
/* Do some checks on provided FDT, if it exists*/
if (orig_fdt) {
if (fdt_check_header(orig_fdt)) {
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index fe7c0e211f9a..57adbc90fdad 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi)
if (spi_present_mask & (1 << addr))
chips++;
}
- if (!chips)
- return -ENODEV;
} else {
type = spi_get_device_id(spi)->driver_data;
pdata = dev_get_platdata(&spi->dev);
@@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi)
if (!(spi_present_mask & (1 << addr)))
continue;
chips--;
- if (chips < 0) {
- dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
- goto fail;
- }
data->mcp[addr] = &data->chip[chips];
status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
0x40 | (addr << 1), type, base,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0c9f803fc1ac..b6ae89ea8811 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
.map = gpio_rcar_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
};
struct gpio_rcar_info {
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f5120046ff80..b066bb3ca01a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -114,6 +114,7 @@ config DRM_RADEON
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
+ select INTERVAL_TREE
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -201,3 +202,5 @@ source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
+
+source "drivers/gpu/drm/sti/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index dd2ba4269740..4a55d59ccd22 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -6,8 +6,8 @@ ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
- drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
+ drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+ drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
@@ -20,11 +20,12 @@ drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
+drm-$(CONFIG_OF) += drm_of.o
drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
- drm_plane_helper.o
+ drm_plane_helper.o drm_dp_mst_topology.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
+obj-$(CONFIG_DRM_STI) += sti/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 59948eff6095..ad3d2ebf95c9 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -15,20 +15,19 @@
#include "armada_drm.h"
#include "armada_hw.h"
-static int armada510_init(struct armada_private *priv, struct device *dev)
+static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
{
- priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+ struct clk *clk;
- if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
- priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+ clk = devm_clk_get(dev, "ext_ref_clk1");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk) == -ENOENT ? -EPROBE_DEFER : PTR_ERR(clk);
- return PTR_RET(priv->extclk[0]);
-}
+ dcrtc->extclk[0] = clk;
-static int armada510_crtc_init(struct armada_crtc *dcrtc)
-{
/* Lower the watermark so to eliminate jitter at higher bandwidths */
armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+
return 0;
}
@@ -45,8 +44,7 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc)
static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
const struct drm_display_mode *mode, uint32_t *sclk)
{
- struct armada_private *priv = dcrtc->crtc.dev->dev_private;
- struct clk *clk = priv->extclk[0];
+ struct clk *clk = dcrtc->extclk[0];
int ret;
if (dcrtc->num == 1)
@@ -81,7 +79,6 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
const struct armada_variant armada510_ops = {
.has_spu_adv_reg = true,
.spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
- .init = armada510_init,
- .crtc_init = armada510_crtc_init,
- .crtc_compute_clock = armada510_crtc_compute_clock,
+ .init = armada510_crtc_init,
+ .compute_clock = armada510_crtc_compute_clock,
};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 81c34f949dfc..3f620e21e06b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -7,6 +7,9 @@
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
@@ -332,24 +335,23 @@ static void armada_drm_crtc_commit(struct drm_crtc *crtc)
static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode, struct drm_display_mode *adj)
{
- struct armada_private *priv = crtc->dev->dev_private;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
int ret;
/* We can't do interlaced modes if we don't have the SPU_ADV_REG */
- if (!priv->variant->has_spu_adv_reg &&
+ if (!dcrtc->variant->has_spu_adv_reg &&
adj->flags & DRM_MODE_FLAG_INTERLACE)
return false;
/* Check whether the display mode is possible */
- ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+ ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL);
if (ret)
return false;
return true;
}
-void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
{
struct armada_vbl_event *e, *n;
void __iomem *base = dcrtc->base;
@@ -410,6 +412,27 @@ void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
}
}
+static irqreturn_t armada_drm_irq(int irq, void *arg)
+{
+ struct armada_crtc *dcrtc = arg;
+ u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /*
+ * This is rediculous - rather than writing bits to clear, we
+ * have to set the actual status register value. This is racy.
+ */
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /* Mask out those interrupts we haven't enabled */
+ v = stat & dcrtc->irq_ena;
+
+ if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+ armada_drm_crtc_irq(dcrtc, stat);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
/* These are locked by dev->vbl_lock */
void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
{
@@ -470,7 +493,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *adj,
int x, int y, struct drm_framebuffer *old_fb)
{
- struct armada_private *priv = crtc->dev->dev_private;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_regs regs[17];
uint32_t lm, rm, tm, bm, val, sclk;
@@ -515,7 +537,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
}
/* Now compute the divider for real */
- priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+ dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
/* Ensure graphic fifo is enabled */
armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
@@ -537,7 +559,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
dcrtc->v[1].spu_v_porch = tm << 16 | bm;
val = adj->crtc_hsync_start;
dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- priv->variant->spu_adv_reg;
+ dcrtc->variant->spu_adv_reg;
if (interlaced) {
/* Odd interlaced frame */
@@ -546,7 +568,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- priv->variant->spu_adv_reg;
+ dcrtc->variant->spu_adv_reg;
} else {
dcrtc->v[0] = dcrtc->v[1];
}
@@ -561,7 +583,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
LCD_SPUT_V_H_TOTAL);
- if (priv->variant->has_spu_adv_reg) {
+ if (dcrtc->variant->has_spu_adv_reg) {
armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
@@ -805,12 +827,11 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_private *priv = crtc->dev->dev_private;
struct armada_gem_object *obj = NULL;
int ret;
/* If no cursor support, replicate drm's return value */
- if (!priv->variant->has_spu_adv_reg)
+ if (!dcrtc->variant->has_spu_adv_reg)
return -ENXIO;
if (handle && w > 0 && h > 0) {
@@ -858,11 +879,10 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct drm_device *dev = crtc->dev;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_private *priv = crtc->dev->dev_private;
int ret;
/* If no cursor support, replicate drm's return value */
- if (!priv->variant->has_spu_adv_reg)
+ if (!dcrtc->variant->has_spu_adv_reg)
return -EFAULT;
mutex_lock(&dev->struct_mutex);
@@ -888,6 +908,10 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
if (!IS_ERR(dcrtc->clk))
clk_disable_unprepare(dcrtc->clk);
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+
+ of_node_put(dcrtc->crtc.port);
+
kfree(dcrtc);
}
@@ -1027,19 +1051,20 @@ static int armada_drm_crtc_create_properties(struct drm_device *dev)
return 0;
}
-int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
- struct resource *res)
+int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
+ struct resource *res, int irq, const struct armada_variant *variant,
+ struct device_node *port)
{
- struct armada_private *priv = dev->dev_private;
+ struct armada_private *priv = drm->dev_private;
struct armada_crtc *dcrtc;
void __iomem *base;
int ret;
- ret = armada_drm_crtc_create_properties(dev);
+ ret = armada_drm_crtc_create_properties(drm);
if (ret)
return ret;
- base = devm_request_and_ioremap(dev->dev, res);
+ base = devm_request_and_ioremap(dev, res);
if (!base) {
DRM_ERROR("failed to ioremap register\n");
return -ENOMEM;
@@ -1051,8 +1076,12 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
return -ENOMEM;
}
+ if (dev != drm->dev)
+ dev_set_drvdata(dev, dcrtc);
+
+ dcrtc->variant = variant;
dcrtc->base = base;
- dcrtc->num = num;
+ dcrtc->num = drm->mode_config.num_crtc;
dcrtc->clk = ERR_PTR(-EINVAL);
dcrtc->csc_yuv_mode = CSC_AUTO;
dcrtc->csc_rgb_mode = CSC_AUTO;
@@ -1074,9 +1103,18 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
+ dcrtc);
+ if (ret < 0) {
+ kfree(dcrtc);
+ return ret;
+ }
- if (priv->variant->crtc_init) {
- ret = priv->variant->crtc_init(dcrtc);
+ if (dcrtc->variant->init) {
+ ret = dcrtc->variant->init(dcrtc, dev);
if (ret) {
kfree(dcrtc);
return ret;
@@ -1088,7 +1126,8 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
priv->dcrtc[dcrtc->num] = dcrtc;
- drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+ dcrtc->crtc.port = port;
+ drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs);
drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
@@ -1096,5 +1135,107 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
dcrtc->csc_rgb_mode);
- return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+ return armada_overlay_plane_create(drm, 1 << dcrtc->num);
+}
+
+static int
+armada_lcd_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+ const struct armada_variant *variant;
+ struct device_node *port = NULL;
+
+ if (irq < 0)
+ return irq;
+
+ if (!dev->of_node) {
+ const struct platform_device_id *id;
+
+ id = platform_get_device_id(pdev);
+ if (!id)
+ return -ENXIO;
+
+ variant = (const struct armada_variant *)id->driver_data;
+ } else {
+ const struct of_device_id *match;
+ struct device_node *np, *parent = dev->of_node;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match)
+ return -ENXIO;
+
+ np = of_get_child_by_name(parent, "ports");
+ if (np)
+ parent = np;
+ port = of_get_child_by_name(parent, "port");
+ of_node_put(np);
+ if (!port) {
+ dev_err(dev, "no port node found in %s\n",
+ parent->full_name);
+ return -ENXIO;
+ }
+
+ variant = match->data;
+ }
+
+ return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
+}
+
+static void
+armada_lcd_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct armada_crtc *dcrtc = dev_get_drvdata(dev);
+
+ armada_drm_crtc_destroy(&dcrtc->crtc);
}
+
+static const struct component_ops armada_lcd_ops = {
+ .bind = armada_lcd_bind,
+ .unbind = armada_lcd_unbind,
+};
+
+static int armada_lcd_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &armada_lcd_ops);
+}
+
+static int armada_lcd_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &armada_lcd_ops);
+ return 0;
+}
+
+static struct of_device_id armada_lcd_of_match[] = {
+ {
+ .compatible = "marvell,dove-lcd",
+ .data = &armada510_ops,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, armada_lcd_of_match);
+
+static const struct platform_device_id armada_lcd_platform_ids[] = {
+ {
+ .name = "armada-lcd",
+ .driver_data = (unsigned long)&armada510_ops,
+ }, {
+ .name = "armada-510-lcd",
+ .driver_data = (unsigned long)&armada510_ops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
+
+struct platform_driver armada_lcd_platform_driver = {
+ .probe = armada_lcd_probe,
+ .remove = armada_lcd_remove,
+ .driver = {
+ .name = "armada-lcd",
+ .owner = THIS_MODULE,
+ .of_match_table = armada_lcd_of_match,
+ },
+ .id_table = armada_lcd_platform_ids,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 9c10a07e7492..98102a5a9af5 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -32,12 +32,15 @@ struct armada_regs {
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
struct armada_frame_work;
+struct armada_variant;
struct armada_crtc {
struct drm_crtc crtc;
+ const struct armada_variant *variant;
unsigned num;
void __iomem *base;
struct clk *clk;
+ struct clk *extclk[2];
struct {
uint32_t spu_v_h_total;
uint32_t spu_v_porch;
@@ -72,12 +75,16 @@ struct armada_crtc {
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
-int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+struct device_node;
+int armada_drm_crtc_create(struct drm_device *, struct device *,
+ struct resource *, int, const struct armada_variant *,
+ struct device_node *);
void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
-void armada_drm_crtc_irq(struct armada_crtc *, u32);
void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+extern struct platform_driver armada_lcd_platform_driver;
+
#endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index a72cae03b99b..ea63c6c7c66f 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -59,26 +59,23 @@ void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
struct armada_private;
struct armada_variant {
- bool has_spu_adv_reg;
+ bool has_spu_adv_reg;
uint32_t spu_adv_reg;
- int (*init)(struct armada_private *, struct device *);
- int (*crtc_init)(struct armada_crtc *);
- int (*crtc_compute_clock)(struct armada_crtc *,
- const struct drm_display_mode *,
- uint32_t *);
+ int (*init)(struct armada_crtc *, struct device *);
+ int (*compute_clock)(struct armada_crtc *,
+ const struct drm_display_mode *,
+ uint32_t *);
};
/* Variant ops */
extern const struct armada_variant armada510_ops;
struct armada_private {
- const struct armada_variant *variant;
struct work_struct fb_unref_work;
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
struct armada_crtc *dcrtc[2];
struct drm_mm linear;
- struct clk *extclk[2];
struct drm_property *csc_yuv_prop;
struct drm_property *csc_rgb_prop;
struct drm_property *colorkey_prop;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 8ab3cd1a8cdb..e2d5792b140f 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -6,7 +6,9 @@
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/module.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
@@ -52,6 +54,11 @@ static const struct armada_drm_slave_config tda19988_config = {
};
#endif
+static bool is_componentized(struct device *dev)
+{
+ return dev->of_node || dev->platform_data;
+}
+
static void armada_drm_unref_work(struct work_struct *work)
{
struct armada_private *priv =
@@ -85,6 +92,7 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
const struct platform_device_id *id;
+ const struct armada_variant *variant;
struct armada_private *priv;
struct resource *res[ARRAY_SIZE(priv->dcrtc)];
struct resource *mem = NULL;
@@ -107,7 +115,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
return -EINVAL;
}
- if (!res[0] || !mem)
+ if (!mem)
return -ENXIO;
if (!devm_request_mem_region(dev->dev, mem->start,
@@ -128,11 +136,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
if (!id)
return -ENXIO;
- priv->variant = (struct armada_variant *)id->driver_data;
-
- ret = priv->variant->init(priv, dev->dev);
- if (ret)
- return ret;
+ variant = (const struct armada_variant *)id->driver_data;
INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
INIT_KFIFO(priv->fb_unref);
@@ -155,40 +159,50 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
/* Create all LCD controllers */
for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+ int irq;
+
if (!res[n])
break;
- ret = armada_drm_crtc_create(dev, n, res[n]);
+ irq = platform_get_irq(dev->platformdev, n);
+ if (irq < 0)
+ goto err_kms;
+
+ ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
+ variant, NULL);
if (ret)
goto err_kms;
}
+ if (is_componentized(dev->dev)) {
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ goto err_kms;
+ } else {
#ifdef CONFIG_DRM_ARMADA_TDA1998X
- ret = armada_drm_connector_slave_create(dev, &tda19988_config);
- if (ret)
- goto err_kms;
+ ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+ if (ret)
+ goto err_kms;
#endif
+ }
- ret = drm_vblank_init(dev, n);
- if (ret)
- goto err_kms;
-
- ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret)
- goto err_kms;
+ goto err_comp;
dev->vblank_disable_allowed = 1;
ret = armada_fbdev_init(dev);
if (ret)
- goto err_irq;
+ goto err_comp;
drm_kms_helper_poll_init(dev);
return 0;
- err_irq:
- drm_irq_uninstall(dev);
+ err_comp:
+ if (is_componentized(dev->dev))
+ component_unbind_all(dev->dev, dev);
err_kms:
drm_mode_config_cleanup(dev);
drm_mm_takedown(&priv->linear);
@@ -203,7 +217,10 @@ static int armada_drm_unload(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
armada_fbdev_fini(dev);
- drm_irq_uninstall(dev);
+
+ if (is_componentized(dev->dev))
+ component_unbind_all(dev->dev, dev);
+
drm_mode_config_cleanup(dev);
drm_mm_takedown(&priv->linear);
flush_work(&priv->fb_unref_work);
@@ -259,52 +276,6 @@ static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
}
-static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = arg;
- struct armada_private *priv = dev->dev_private;
- struct armada_crtc *dcrtc = priv->dcrtc[0];
- uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
- irqreturn_t handled = IRQ_NONE;
-
- /*
- * This is rediculous - rather than writing bits to clear, we
- * have to set the actual status register value. This is racy.
- */
- writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
-
- /* Mask out those interrupts we haven't enabled */
- v = stat & dcrtc->irq_ena;
-
- if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
- armada_drm_crtc_irq(dcrtc, stat);
- handled = IRQ_HANDLED;
- }
-
- return handled;
-}
-
-static int armada_drm_irq_postinstall(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
- struct armada_crtc *dcrtc = priv->dcrtc[0];
-
- spin_lock_irq(&dev->vbl_lock);
- writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
- writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
- spin_unlock_irq(&dev->vbl_lock);
-
- return 0;
-}
-
-static void armada_drm_irq_uninstall(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
- struct armada_crtc *dcrtc = priv->dcrtc[0];
-
- writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
-}
-
static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
DRM_UNLOCKED),
@@ -340,9 +311,6 @@ static struct drm_driver armada_drm_driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
- .irq_handler = armada_drm_irq_handler,
- .irq_postinstall = armada_drm_irq_postinstall,
- .irq_uninstall = armada_drm_irq_uninstall,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = armada_drm_debugfs_init,
.debugfs_cleanup = armada_drm_debugfs_cleanup,
@@ -362,19 +330,140 @@ static struct drm_driver armada_drm_driver = {
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_HAVE_IRQ | DRIVER_PRIME,
+ DRIVER_PRIME,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
+static int armada_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&armada_drm_driver, to_platform_device(dev));
+}
+
+static void armada_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int compare_dev_name(struct device *dev, void *data)
+{
+ const char *name = data;
+ return !strcmp(dev_name(dev), name);
+}
+
+static void armada_add_endpoints(struct device *dev,
+ struct component_match **match, struct device_node *port)
+{
+ struct device_node *ep, *remote;
+
+ for_each_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent device of %s is not available\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+
+ component_match_add(dev, match, compare_of, remote);
+ of_node_put(remote);
+ }
+}
+
+static int armada_drm_find_components(struct device *dev,
+ struct component_match **match)
+{
+ struct device_node *port;
+ int i;
+
+ if (dev->of_node) {
+ struct device_node *np = dev->of_node;
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ component_match_add(dev, match, compare_of, port);
+ of_node_put(port);
+ }
+
+ if (i == 0) {
+ dev_err(dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ armada_add_endpoints(dev, match, port);
+ of_node_put(port);
+ }
+ } else if (dev->platform_data) {
+ char **devices = dev->platform_data;
+ struct device *d;
+
+ for (i = 0; devices[i]; i++)
+ component_match_add(dev, match, compare_dev_name,
+ devices[i]);
+
+ if (i == 0) {
+ dev_err(dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; devices[i]; i++) {
+ d = bus_find_device_by_name(&platform_bus_type, NULL,
+ devices[i]);
+ if (d && d->of_node) {
+ for_each_child_of_node(d->of_node, port)
+ armada_add_endpoints(dev, match, port);
+ }
+ put_device(d);
+ }
+ }
+
+ return 0;
+}
+
+static const struct component_master_ops armada_master_ops = {
+ .bind = armada_drm_bind,
+ .unbind = armada_drm_unbind,
+};
+
static int armada_drm_probe(struct platform_device *pdev)
{
- return drm_platform_init(&armada_drm_driver, pdev);
+ if (is_componentized(&pdev->dev)) {
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = armada_drm_find_components(&pdev->dev, &match);
+ if (ret < 0)
+ return ret;
+
+ return component_master_add_with_match(&pdev->dev,
+ &armada_master_ops, match);
+ } else {
+ return drm_platform_init(&armada_drm_driver, pdev);
+ }
}
static int armada_drm_remove(struct platform_device *pdev)
{
- drm_put_dev(platform_get_drvdata(pdev));
+ if (is_componentized(&pdev->dev))
+ component_master_del(&pdev->dev, &armada_master_ops);
+ else
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
@@ -402,14 +491,24 @@ static struct platform_driver armada_drm_platform_driver = {
static int __init armada_drm_init(void)
{
+ int ret;
+
armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
- return platform_driver_register(&armada_drm_platform_driver);
+
+ ret = platform_driver_register(&armada_lcd_platform_driver);
+ if (ret)
+ return ret;
+ ret = platform_driver_register(&armada_drm_platform_driver);
+ if (ret)
+ platform_driver_unregister(&armada_lcd_platform_driver);
+ return ret;
}
module_init(armada_drm_init);
static void __exit armada_drm_exit(void)
{
platform_driver_unregister(&armada_drm_platform_driver);
+ platform_driver_unregister(&armada_lcd_platform_driver);
}
module_exit(armada_drm_exit);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5d6a87573c33..957d4fabf1e1 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -362,7 +362,7 @@ static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9896286ed262..5389350244f2 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -667,17 +667,9 @@ static void ast_encoder_destroy(struct drm_encoder *encoder)
static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 9c13df29fd20..f5e0ead974a6 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -97,6 +97,7 @@ static struct drm_driver bochs_driver = {
/* ---------------------------------------------------------------------- */
/* pm interface */
+#ifdef CONFIG_PM_SLEEP
static int bochs_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -131,6 +132,7 @@ static int bochs_pm_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
return 0;
}
+#endif
static const struct dev_pm_ops bochs_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend,
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 19cf3e9413b6..fe95d31cd110 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -72,7 +72,7 @@ static int bochsfb_create(struct drm_fb_helper *helper,
bo = gem_to_bochs_bo(gobj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index dcf2e55f4ae9..9d7346b92653 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -53,7 +53,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
bochs_fb = to_bochs_framebuffer(old_fb);
bo = gem_to_bochs_bo(bochs_fb->obj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
if (ret) {
DRM_ERROR("failed to reserve old_fb bo\n");
} else {
@@ -67,7 +67,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
bo = gem_to_bochs_bo(bochs_fb->obj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
if (ret)
return ret;
@@ -216,18 +216,9 @@ static struct drm_encoder *
bochs_connector_best_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 08ce520f61a5..4516b052cc67 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -76,6 +76,7 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
+#ifdef CONFIG_PM_SLEEP
static int cirrus_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -110,6 +111,7 @@ static int cirrus_pm_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
return 0;
}
+#endif
static const struct file_operations cirrus_driver_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 117d3eca5e37..401c890b6c6a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -241,7 +241,7 @@ static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 49332c5fe35b..e1c5c3222129 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -509,19 +509,9 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj =
- drm_mode_object_find(connector->dev, enc_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 0406110f83ed..86a4a4a60afc 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -80,11 +80,7 @@ int drm_buffer_alloc(struct drm_buffer **buf, int size)
error_out:
- /* Only last element can be null pointer so check for it first. */
- if ((*buf)->data[idx])
- kfree((*buf)->data[idx]);
-
- for (--idx; idx >= 0; --idx)
+ for (; idx >= 0; --idx)
kfree((*buf)->data[idx]);
kfree(*buf);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 68175b54504b..61acb8f6756d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1217,7 +1217,6 @@ int drm_infobufs(struct drm_device *dev, void *data,
struct drm_buf_desc __user *to =
&request->list[count];
struct drm_buf_entry *from = &dma->bufs[i];
- struct drm_freelist *list = &dma->bufs[i].freelist;
if (copy_to_user(&to->count,
&from->buf_count,
sizeof(from->buf_count)) ||
@@ -1225,19 +1224,19 @@ int drm_infobufs(struct drm_device *dev, void *data,
&from->buf_size,
sizeof(from->buf_size)) ||
copy_to_user(&to->low_mark,
- &list->low_mark,
- sizeof(list->low_mark)) ||
+ &from->low_mark,
+ sizeof(from->low_mark)) ||
copy_to_user(&to->high_mark,
- &list->high_mark,
- sizeof(list->high_mark)))
+ &from->high_mark,
+ sizeof(from->high_mark)))
return -EFAULT;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
- dma->bufs[i].freelist.low_mark,
- dma->bufs[i].freelist.high_mark);
+ dma->bufs[i].low_mark,
+ dma->bufs[i].high_mark);
++count;
}
}
@@ -1290,8 +1289,8 @@ int drm_markbufs(struct drm_device *dev, void *data,
if (request->high_mark < 0 || request->high_mark > entry->buf_count)
return -EINVAL;
- entry->freelist.low_mark = request->low_mark;
- entry->freelist.high_mark = request->high_mark;
+ entry->low_mark = request->low_mark;
+ entry->high_mark = request->high_mark;
return 0;
}
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index a4b017b6849e..9b23525c0ed0 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -1,18 +1,13 @@
-/**
- * \file drm_context.c
- * IOCTLs for generic contexts
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- */
-
/*
- * Created: Fri Nov 24 18:31:37 2000 by [email protected]
+ * Legacy: Generic DRM Contexts
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author: Rickard E. (Rik) Faith <[email protected]>
+ * Author: Gareth Hughes <[email protected]>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -33,14 +28,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/*
- * ChangeLog:
- * 2001-11-16 Torsten Duwe <[email protected]>
- * added context constructor/destructor hooks,
- * needed by SiS driver's memory management.
- */
-
#include <drm/drmP.h>
+#include "drm_legacy.h"
+
+struct drm_ctx_list {
+ struct list_head head;
+ drm_context_t handle;
+ struct drm_file *tag;
+};
/******************************************************************/
/** \name Context bitmap support */
@@ -56,7 +51,7 @@
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
@@ -72,7 +67,7 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
-static int drm_ctxbitmap_next(struct drm_device * dev)
+static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
{
int ret;
@@ -90,7 +85,7 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
*
* Initialise the drm_device::ctx_idr
*/
-int drm_ctxbitmap_init(struct drm_device * dev)
+int drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
idr_init(&dev->ctx_idr);
return 0;
@@ -104,13 +99,43 @@ int drm_ctxbitmap_init(struct drm_device * dev)
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
+/**
+ * drm_ctxbitmap_flush() - Flush all contexts owned by a file
+ * @dev: DRM device to operate on
+ * @file: Open file to flush contexts for
+ *
+ * This iterates over all contexts on @dev and drops them if they're owned by
+ * @file. Note that after this call returns, new contexts might be added if
+ * the file is still alive.
+ */
+void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_ctx_list *pos, *tmp;
+
+ mutex_lock(&dev->ctxlist_mutex);
+
+ list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
+ if (pos->tag == file &&
+ pos->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev, pos->handle);
+
+ drm_legacy_ctxbitmap_free(dev, pos->handle);
+ list_del(&pos->head);
+ kfree(pos);
+ }
+ }
+
+ mutex_unlock(&dev->ctxlist_mutex);
+}
+
/*@}*/
/******************************************************************/
@@ -129,8 +154,8 @@ void drm_ctxbitmap_cleanup(struct drm_device * dev)
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
-int drm_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_getsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map;
@@ -173,8 +198,8 @@ int drm_getsareactx(struct drm_device *dev, void *data,
* Searches the mapping specified in \p arg and update the entry in
* drm_device::ctx_idr with it.
*/
-int drm_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_setsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map = NULL;
@@ -273,8 +298,8 @@ static int drm_context_switch_complete(struct drm_device *dev,
* \param arg user argument pointing to a drm_ctx_res structure.
* \return zero on success or a negative number on failure.
*/
-int drm_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_resctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_res *res = data;
struct drm_ctx ctx;
@@ -304,16 +329,16 @@ int drm_resctx(struct drm_device *dev, void *data,
*
* Get a new handle for the context and copy to userspace.
*/
-int drm_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_addctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
- ctx->handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_legacy_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
- ctx->handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == -1) {
@@ -348,7 +373,8 @@ int drm_addctx(struct drm_device *dev, void *data,
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*/
-int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_legacy_getctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -369,8 +395,8 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Calls context_switch().
*/
-int drm_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_switchctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -389,8 +415,8 @@ int drm_switchctx(struct drm_device *dev, void *data,
*
* Calls context_switch_complete().
*/
-int drm_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_newctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -411,8 +437,8 @@ int drm_newctx(struct drm_device *dev, void *data,
*
* If not the special kernel context, calls ctxbitmap_free() to free the specified context.
*/
-int drm_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_rmctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -420,7 +446,7 @@ int drm_rmctx(struct drm_device *dev, void *data,
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);
- drm_ctxbitmap_free(dev, ctx->handle);
+ drm_legacy_ctxbitmap_free(dev, ctx->handle);
}
mutex_lock(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 7db6251c38e7..ca8bb1bc92a0 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -41,6 +41,10 @@
#include "drm_crtc_internal.h"
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv);
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
@@ -178,6 +182,12 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
+static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
+ { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
+ { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
+ { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
+};
+
/*
* Non-global properties, but "required" for certain connectors.
*/
@@ -357,6 +367,32 @@ const char *drm_get_format_name(uint32_t format)
}
EXPORT_SYMBOL(drm_get_format_name);
+/*
+ * Internal function to assign a slot in the object idr and optionally
+ * register the object into the idr.
+ */
+static int drm_mode_object_get_reg(struct drm_device *dev,
+ struct drm_mode_object *obj,
+ uint32_t obj_type,
+ bool register_obj)
+{
+ int ret;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = ret;
+ obj->type = obj_type;
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+
/**
* drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
@@ -375,21 +411,15 @@ EXPORT_SYMBOL(drm_get_format_name);
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
- int ret;
+ return drm_mode_object_get_reg(dev, obj, obj_type, true);
+}
+static void drm_mode_object_register(struct drm_device *dev,
+ struct drm_mode_object *obj)
+{
mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
- if (ret >= 0) {
- /*
- * Set up the object linking under the protection of the idr
- * lock so that other users can't see inconsistent state.
- */
- obj->id = ret;
- obj->type = obj_type;
- }
+ idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
mutex_unlock(&dev->mode_config.idr_mutex);
-
- return ret < 0 ? ret : 0;
}
/**
@@ -416,8 +446,12 @@ static struct drm_mode_object *_object_find(struct drm_device *dev,
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) ||
- (obj->id != id))
+ if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
+ obj = NULL;
+ if (obj && obj->id != id)
+ obj = NULL;
+ /* don't leak out unref'd fb's */
+ if (obj && (obj->type == DRM_MODE_OBJECT_FB))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
@@ -444,9 +478,6 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
obj = _object_find(dev, id, type);
- /* don't leak out unref'd fb's */
- if (obj && (obj->type == DRM_MODE_OBJECT_FB))
- obj = NULL;
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
@@ -723,7 +754,7 @@ DEFINE_WW_CLASS(crtc_ww_class);
*/
int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary,
- void *cursor,
+ struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs)
{
struct drm_mode_config *config = &dev->mode_config;
@@ -748,8 +779,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
config->num_crtc++;
crtc->primary = primary;
+ crtc->cursor = cursor;
if (primary)
primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+ if (cursor)
+ cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
out:
drm_modeset_unlock_all(dev);
@@ -842,7 +876,7 @@ int drm_connector_init(struct drm_device *dev,
drm_modeset_lock_all(dev);
- ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false);
if (ret)
goto out_unlock;
@@ -935,6 +969,8 @@ int drm_connector_register(struct drm_connector *connector)
{
int ret;
+ drm_mode_object_register(connector->dev, &connector->base);
+
ret = drm_sysfs_connector_add(connector);
if (ret)
return ret;
@@ -1257,6 +1293,7 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
{
struct drm_property *edid;
struct drm_property *dpms;
+ struct drm_property *dev_path;
/*
* Standard properties (apply to all connectors)
@@ -1271,6 +1308,12 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
ARRAY_SIZE(drm_dpms_enum_list));
dev->mode_config.dpms_property = dpms;
+ dev_path = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "PATH", 0);
+ dev->mode_config.path_property = dev_path;
+
return 0;
}
@@ -1427,6 +1470,33 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
+ * drm_mode_create_aspect_ratio_property - create aspect ratio property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
+{
+ if (dev->mode_config.aspect_ratio_property)
+ return 0;
+
+ dev->mode_config.aspect_ratio_property =
+ drm_property_create_enum(dev, 0, "aspect ratio",
+ drm_aspect_ratio_enum_list,
+ ARRAY_SIZE(drm_aspect_ratio_enum_list));
+
+ if (dev->mode_config.aspect_ratio_property == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
+
+/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
@@ -1513,6 +1583,15 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
+void drm_reinit_primary_mode_group(struct drm_device *dev)
+{
+ drm_modeset_lock_all(dev);
+ drm_mode_group_destroy(&dev->primary->mode_group);
+ drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+ drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_reinit_primary_mode_group);
+
/**
* drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
@@ -2161,45 +2240,32 @@ out:
return ret;
}
-/**
- * drm_mode_setplane - configure a plane's configuration
- * @dev: DRM device
- * @data: ioctl data*
- * @file_priv: DRM file info
+/*
+ * setplane_internal - setplane handler for internal callers
*
- * Set plane configuration, including placement, fb, scaling, and other factors.
- * Or pass a NULL fb to disable.
+ * Note that we assume an extra reference has already been taken on fb. If the
+ * update fails, this reference will be dropped before return; if it succeeds,
+ * the previous framebuffer (if any) will be unreferenced instead.
*
- * Returns:
- * Zero on success, errno on failure.
+ * src_{x,y,w,h} are provided in 16.16 fixed point format
*/
-int drm_mode_setplane(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int setplane_internal(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ /* src_{x,y,w,h} values are 16.16 fixed point */
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
{
- struct drm_mode_set_plane *plane_req = data;
- struct drm_plane *plane;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ struct drm_device *dev = plane->dev;
+ struct drm_framebuffer *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- /*
- * First, find the plane, crtc, and fb objects. If not available,
- * we don't bother to call the driver.
- */
- plane = drm_plane_find(dev, plane_req->plane_id);
- if (!plane) {
- DRM_DEBUG_KMS("Unknown plane ID %d\n",
- plane_req->plane_id);
- return -ENOENT;
- }
-
/* No fb means shut it down */
- if (!plane_req->fb_id) {
+ if (!fb) {
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane);
@@ -2213,14 +2279,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
goto out;
}
- crtc = drm_crtc_find(dev, plane_req->crtc_id);
- if (!crtc) {
- DRM_DEBUG_KMS("Unknown crtc ID %d\n",
- plane_req->crtc_id);
- ret = -ENOENT;
- goto out;
- }
-
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
DRM_DEBUG_KMS("Invalid crtc for plane\n");
@@ -2228,14 +2286,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
goto out;
}
- fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
- if (!fb) {
- DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
- plane_req->fb_id);
- ret = -ENOENT;
- goto out;
- }
-
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
if (fb->pixel_format == plane->format_types[i])
@@ -2251,43 +2301,25 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
- if (plane_req->src_w > fb_width ||
- plane_req->src_x > fb_width - plane_req->src_w ||
- plane_req->src_h > fb_height ||
- plane_req->src_y > fb_height - plane_req->src_h) {
+ if (src_w > fb_width ||
+ src_x > fb_width - src_w ||
+ src_h > fb_height ||
+ src_y > fb_height - src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
- plane_req->src_w >> 16,
- ((plane_req->src_w & 0xffff) * 15625) >> 10,
- plane_req->src_h >> 16,
- ((plane_req->src_h & 0xffff) * 15625) >> 10,
- plane_req->src_x >> 16,
- ((plane_req->src_x & 0xffff) * 15625) >> 10,
- plane_req->src_y >> 16,
- ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+ src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+ src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+ src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
ret = -ENOSPC;
goto out;
}
- /* Give drivers some help against integer overflows */
- if (plane_req->crtc_w > INT_MAX ||
- plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
- plane_req->crtc_h > INT_MAX ||
- plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->crtc_x, plane_req->crtc_y);
- ret = -ERANGE;
- goto out;
- }
-
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
- plane_req->crtc_x, plane_req->crtc_y,
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->src_x, plane_req->src_y,
- plane_req->src_w, plane_req->src_h);
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
@@ -2304,6 +2336,85 @@ out:
drm_framebuffer_unreference(old_fb);
return ret;
+
+}
+
+/**
+ * drm_mode_setplane - configure a plane's configuration
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_priv: DRM file info
+ *
+ * Set plane configuration, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable (planes may be disabled without providing a
+ * valid crtc).
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc = NULL;
+ struct drm_framebuffer *fb = NULL;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ return -ERANGE;
+ }
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ return -ENOENT;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane_req->fb_id) {
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ return -ENOENT;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ return -ENOENT;
+ }
+ crtc = obj_to_crtc(obj);
+ }
+
+ /*
+ * setplane_internal will take care of deref'ing either the old or new
+ * framebuffer depending on success.
+ */
+ return setplane_internal(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
}
/**
@@ -2552,6 +2663,102 @@ out:
return ret;
}
+/**
+ * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
+ * universal plane handler call
+ * @crtc: crtc to update cursor for
+ * @req: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Legacy cursor ioctl's work directly with driver buffer handles. To
+ * translate legacy ioctl calls into universal plane handler calls, we need to
+ * wrap the native buffer handle in a drm_framebuffer.
+ *
+ * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
+ * buffer with a pitch of 4*width; the universal plane interface should be used
+ * directly in cases where the hardware can support other buffer settings and
+ * userspace wants to make use of these capabilities.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+ struct drm_mode_cursor2 *req,
+ struct drm_file *file_priv)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd2 fbreq = {
+ .width = req->width,
+ .height = req->height,
+ .pixel_format = DRM_FORMAT_ARGB8888,
+ .pitches = { req->width * 4 },
+ .handles = { req->handle },
+ };
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w = 0, crtc_h = 0;
+ uint32_t src_w = 0, src_h = 0;
+ int ret = 0;
+
+ BUG_ON(!crtc->cursor);
+
+ /*
+ * Obtain fb we'll be using (either new or existing) and take an extra
+ * reference to it if fb != null. setplane will take care of dropping
+ * the reference if the plane update fails.
+ */
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (req->handle) {
+ fb = add_framebuffer_internal(dev, &fbreq, file_priv);
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+ return PTR_ERR(fb);
+ }
+
+ drm_framebuffer_reference(fb);
+ } else {
+ fb = NULL;
+ }
+ } else {
+ mutex_lock(&dev->mode_config.mutex);
+ fb = crtc->cursor->fb;
+ if (fb)
+ drm_framebuffer_reference(fb);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ crtc_x = req->x;
+ crtc_y = req->y;
+ } else {
+ crtc_x = crtc->cursor_x;
+ crtc_y = crtc->cursor_y;
+ }
+
+ if (fb) {
+ crtc_w = fb->width;
+ crtc_h = fb->height;
+ src_w = fb->width << 16;
+ src_h = fb->height << 16;
+ }
+
+ /*
+ * setplane_internal will take care of deref'ing either the old or new
+ * framebuffer depending on success.
+ */
+ ret = setplane_internal(crtc->cursor, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h);
+
+ /* Update successful; save new cursor position, if necessary */
+ if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+ crtc->cursor_x = req->x;
+ crtc->cursor_y = req->y;
+ }
+
+ return ret;
+}
+
static int drm_mode_cursor_common(struct drm_device *dev,
struct drm_mode_cursor2 *req,
struct drm_file *file_priv)
@@ -2571,6 +2778,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
return -ENOENT;
}
+ /*
+ * If this crtc has a universal cursor plane, call that plane's update
+ * handler rather than using legacy cursor handlers.
+ */
+ if (crtc->cursor)
+ return drm_mode_cursor_universal(crtc, req, file_priv);
+
drm_modeset_lock(&crtc->mutex, NULL);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
@@ -2870,56 +3084,38 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
return 0;
}
-/**
- * drm_mode_addfb2 - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request with format. This is
- * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
- * and uses fourcc codes as pixel format specifiers.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, errno on failure.
- */
-int drm_mode_addfb2(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
if (r->flags & ~DRM_MODE_FB_INTERLACED) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
ret = framebuffer_check(r);
if (ret)
- return ret;
+ return ERR_PTR(ret);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- return PTR_ERR(fb);
+ return fb;
}
mutex_lock(&file_priv->fbs_lock);
@@ -2928,8 +3124,37 @@ int drm_mode_addfb2(struct drm_device *dev,
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_unlock(&file_priv->fbs_lock);
+ return fb;
+}
- return ret;
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_framebuffer *fb;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = add_framebuffer_internal(dev, data, file_priv);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ return 0;
}
/**
@@ -3019,7 +3244,7 @@ int drm_mode_getfb(struct drm_device *dev,
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle) {
- if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
+ if (drm_is_master(file_priv) || capable(CAP_SYS_ADMIN) ||
drm_is_control_client(file_priv)) {
ret = fb->funcs->create_handle(fb, file_priv,
&r->handle);
@@ -3219,7 +3444,7 @@ fail:
EXPORT_SYMBOL(drm_property_create);
/**
- * drm_property_create - create a new enumeration property type
+ * drm_property_create_enum - create a new enumeration property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3265,7 +3490,7 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
EXPORT_SYMBOL(drm_property_create_enum);
/**
- * drm_property_create - create a new bitmask property type
+ * drm_property_create_bitmask - create a new bitmask property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3285,19 +3510,28 @@ EXPORT_SYMBOL(drm_property_create_enum);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
- int num_values)
+ int num_props,
+ uint64_t supported_bits)
{
struct drm_property *property;
- int i, ret;
+ int i, ret, index = 0;
+ int num_values = hweight64(supported_bits);
flags |= DRM_MODE_PROP_BITMASK;
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
+ for (i = 0; i < num_props; i++) {
+ if (!(supported_bits & (1ULL << props[i].type)))
+ continue;
- for (i = 0; i < num_values; i++) {
- ret = drm_property_add_enum(property, i,
+ if (WARN_ON(index >= num_values)) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+
+ ret = drm_property_add_enum(property, index++,
props[i].type,
props[i].name);
if (ret) {
@@ -3327,7 +3561,7 @@ static struct drm_property *property_create_range(struct drm_device *dev,
}
/**
- * drm_property_create - create a new ranged property type
+ * drm_property_create_range - create a new ranged property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3746,6 +3980,25 @@ done:
return ret;
}
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+ char *path)
+{
+ struct drm_device *dev = connector->dev;
+ int ret, size;
+ size = strlen(path) + 1;
+
+ connector->path_blob_ptr = drm_property_create_blob(connector->dev,
+ size, path);
+ if (!connector->path_blob_ptr)
+ return -EINVAL;
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.path_property,
+ connector->path_blob_ptr->base.id);
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+
/**
* drm_mode_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
@@ -4727,6 +4980,36 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
/**
+ * drm_rotation_simplify() - Try to simplify the rotation
+ * @rotation: Rotation to be simplified
+ * @supported_rotations: Supported rotations
+ *
+ * Attempt to simplify the rotation to a form that is supported.
+ * Eg. if the hardware supports everything except DRM_REFLECT_X
+ * one could call this function like this:
+ *
+ * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
+ * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
+ * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
+ *
+ * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
+ * transforms the hardware supports, this function may not
+ * be able to produce a supported transform, so the caller should
+ * check the result afterwards.
+ */
+unsigned int drm_rotation_simplify(unsigned int rotation,
+ unsigned int supported_rotations)
+{
+ if (rotation & ~supported_rotations) {
+ rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
+ rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
+ }
+
+ return rotation;
+}
+EXPORT_SYMBOL(drm_rotation_simplify);
+
+/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
@@ -4844,3 +5127,21 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+ unsigned int supported_rotations)
+{
+ static const struct drm_prop_enum_list props[] = {
+ { DRM_ROTATE_0, "rotate-0" },
+ { DRM_ROTATE_90, "rotate-90" },
+ { DRM_ROTATE_180, "rotate-180" },
+ { DRM_ROTATE_270, "rotate-270" },
+ { DRM_REFLECT_X, "reflect-x" },
+ { DRM_REFLECT_Y, "reflect-y" },
+ };
+
+ return drm_property_create_bitmask(dev, 0, "rotation",
+ props, ARRAY_SIZE(props),
+ supported_rotations);
+}
+EXPORT_SYMBOL(drm_mode_create_rotation_property);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
new file mode 100644
index 000000000000..ac3c2738db94
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -0,0 +1,2715 @@
+/*
+ * Copyright © 2014 Red Hat
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/i2c.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drmP.h>
+
+#include <drm/drm_fixed.h>
+
+/**
+ * DOC: dp mst helper
+ *
+ * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
+ * protocol. The helpers contain a topology manager and bandwidth manager.
+ * The helpers encapsulate the sending and received of sideband msgs.
+ */
+static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
+ char *buf);
+static int test_calc_pbn_mode(void);
+
+static void drm_dp_put_port(struct drm_dp_mst_port *port);
+
+static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+ int id,
+ struct drm_dp_payload *payload);
+
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes);
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
+static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port);
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+ u8 *guid);
+
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
+static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+/* sideband msg handling */
+static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
+{
+ u8 bitmask = 0x80;
+ u8 bitshift = 7;
+ u8 array_index = 0;
+ int number_of_bits = num_nibbles * 4;
+ u8 remainder = 0;
+
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ remainder |= (data[array_index] & bitmask) >> bitshift;
+ bitmask >>= 1;
+ bitshift--;
+ if (bitmask == 0) {
+ bitmask = 0x80;
+ bitshift = 7;
+ array_index++;
+ }
+ if ((remainder & 0x10) == 0x10)
+ remainder ^= 0x13;
+ }
+
+ number_of_bits = 4;
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ if ((remainder & 0x10) != 0)
+ remainder ^= 0x13;
+ }
+
+ return remainder;
+}
+
+static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
+{
+ u8 bitmask = 0x80;
+ u8 bitshift = 7;
+ u8 array_index = 0;
+ int number_of_bits = number_of_bytes * 8;
+ u16 remainder = 0;
+
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ remainder |= (data[array_index] & bitmask) >> bitshift;
+ bitmask >>= 1;
+ bitshift--;
+ if (bitmask == 0) {
+ bitmask = 0x80;
+ bitshift = 7;
+ array_index++;
+ }
+ if ((remainder & 0x100) == 0x100)
+ remainder ^= 0xd5;
+ }
+
+ number_of_bits = 8;
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ if ((remainder & 0x100) != 0)
+ remainder ^= 0xd5;
+ }
+
+ return remainder & 0xff;
+}
+static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
+{
+ u8 size = 3;
+ size += (hdr->lct / 2);
+ return size;
+}
+
+static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+ u8 *buf, int *len)
+{
+ int idx = 0;
+ int i;
+ u8 crc4;
+ buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
+ for (i = 0; i < (hdr->lct / 2); i++)
+ buf[idx++] = hdr->rad[i];
+ buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
+ (hdr->msg_len & 0x3f);
+ buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
+
+ crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
+ buf[idx - 1] |= (crc4 & 0xf);
+
+ *len = idx;
+}
+
+static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+ u8 *buf, int buflen, u8 *hdrlen)
+{
+ u8 crc4;
+ u8 len;
+ int i;
+ u8 idx;
+ if (buf[0] == 0)
+ return false;
+ len = 3;
+ len += ((buf[0] & 0xf0) >> 4) / 2;
+ if (len > buflen)
+ return false;
+ crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
+
+ if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
+ DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
+ return false;
+ }
+
+ hdr->lct = (buf[0] & 0xf0) >> 4;
+ hdr->lcr = (buf[0] & 0xf);
+ idx = 1;
+ for (i = 0; i < (hdr->lct / 2); i++)
+ hdr->rad[i] = buf[idx++];
+ hdr->broadcast = (buf[idx] >> 7) & 0x1;
+ hdr->path_msg = (buf[idx] >> 6) & 0x1;
+ hdr->msg_len = buf[idx] & 0x3f;
+ idx++;
+ hdr->somt = (buf[idx] >> 7) & 0x1;
+ hdr->eomt = (buf[idx] >> 6) & 0x1;
+ hdr->seqno = (buf[idx] >> 4) & 0x1;
+ idx++;
+ *hdrlen = idx;
+ return true;
+}
+
+static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw)
+{
+ int idx = 0;
+ int i;
+ u8 *buf = raw->msg;
+ buf[idx++] = req->req_type & 0x7f;
+
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
+ buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
+ idx++;
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
+ (req->u.allocate_payload.number_sdp_streams & 0xf);
+ idx++;
+ buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
+ idx++;
+ buf[idx] = (req->u.allocate_payload.pbn >> 8);
+ idx++;
+ buf[idx] = (req->u.allocate_payload.pbn & 0xff);
+ idx++;
+ for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
+ buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
+ (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
+ idx++;
+ }
+ if (req->u.allocate_payload.number_sdp_streams & 1) {
+ i = req->u.allocate_payload.number_sdp_streams - 1;
+ buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
+ idx++;
+ }
+ break;
+ case DP_QUERY_PAYLOAD:
+ buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
+ idx++;
+ buf[idx] = (req->u.query_payload.vcpi & 0x7f);
+ idx++;
+ break;
+ case DP_REMOTE_DPCD_READ:
+ buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
+ buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
+ idx++;
+ buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
+ idx++;
+ buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
+ idx++;
+ buf[idx] = (req->u.dpcd_read.num_bytes);
+ idx++;
+ break;
+
+ case DP_REMOTE_DPCD_WRITE:
+ buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
+ buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
+ idx++;
+ buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
+ idx++;
+ buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
+ idx++;
+ buf[idx] = (req->u.dpcd_write.num_bytes);
+ idx++;
+ memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
+ idx += req->u.dpcd_write.num_bytes;
+ break;
+ case DP_REMOTE_I2C_READ:
+ buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
+ buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
+ idx++;
+ for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
+ buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
+ idx++;
+ buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
+ idx++;
+ memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
+ idx += req->u.i2c_read.transactions[i].num_bytes;
+
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
+ idx++;
+ }
+ buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
+ idx++;
+ buf[idx] = (req->u.i2c_read.num_bytes_read);
+ idx++;
+ break;
+
+ case DP_REMOTE_I2C_WRITE:
+ buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
+ idx++;
+ buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
+ idx++;
+ buf[idx] = (req->u.i2c_write.num_bytes);
+ idx++;
+ memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
+ idx += req->u.i2c_write.num_bytes;
+ break;
+ }
+ raw->cur_len = idx;
+}
+
+static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
+{
+ u8 crc4;
+ crc4 = drm_dp_msg_data_crc4(msg, len);
+ msg[len] = crc4;
+}
+
+static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
+ struct drm_dp_sideband_msg_tx *raw)
+{
+ int idx = 0;
+ u8 *buf = raw->msg;
+
+ buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
+
+ raw->cur_len = idx;
+}
+
+/* this adds a chunk of msg to the builder to get the final msg */
+static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
+ u8 *replybuf, u8 replybuflen, bool hdr)
+{
+ int ret;
+ u8 crc4;
+
+ if (hdr) {
+ u8 hdrlen;
+ struct drm_dp_sideband_msg_hdr recv_hdr;
+ ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
+ if (ret == false) {
+ print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
+ return false;
+ }
+
+ /* get length contained in this portion */
+ msg->curchunk_len = recv_hdr.msg_len;
+ msg->curchunk_hdrlen = hdrlen;
+
+ /* we have already gotten an somt - don't bother parsing */
+ if (recv_hdr.somt && msg->have_somt)
+ return false;
+
+ if (recv_hdr.somt) {
+ memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
+ msg->have_somt = true;
+ }
+ if (recv_hdr.eomt)
+ msg->have_eomt = true;
+
+ /* copy the bytes for the remainder of this header chunk */
+ msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
+ memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
+ } else {
+ memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
+ msg->curchunk_idx += replybuflen;
+ }
+
+ if (msg->curchunk_idx >= msg->curchunk_len) {
+ /* do CRC */
+ crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
+ /* copy chunk into bigger msg */
+ memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
+ msg->curlen += msg->curchunk_len - 1;
+ }
+ return true;
+}
+
+static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ int i;
+ memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
+ idx += 16;
+ repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ for (i = 0; i < repmsg->u.link_addr.nports; i++) {
+ if (raw->msg[idx] & 0x80)
+ repmsg->u.link_addr.ports[i].input_port = 1;
+
+ repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
+ repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
+
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
+ repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
+ if (repmsg->u.link_addr.ports[i].input_port == 0)
+ repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ if (repmsg->u.link_addr.ports[i].input_port == 0) {
+ repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
+ idx += 16;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
+ repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
+ idx++;
+
+ }
+ if (idx > raw->curlen)
+ goto fail_len;
+ }
+
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+
+ repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
+ idx++;
+ /* TODO check */
+ memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.allocate_payload.vcpi = raw->msg[idx];
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+ msg->reply_type = (raw->msg[0] & 0x80) >> 7;
+ msg->req_type = (raw->msg[0] & 0x7f);
+
+ if (msg->reply_type) {
+ memcpy(msg->u.nak.guid, &raw->msg[1], 16);
+ msg->u.nak.reason = raw->msg[17];
+ msg->u.nak.nak_data = raw->msg[18];
+ return false;
+ }
+
+ switch (msg->req_type) {
+ case DP_LINK_ADDRESS:
+ return drm_dp_sideband_parse_link_address(raw, msg);
+ case DP_QUERY_PAYLOAD:
+ return drm_dp_sideband_parse_query_payload_ack(raw, msg);
+ case DP_REMOTE_DPCD_READ:
+ return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
+ case DP_REMOTE_DPCD_WRITE:
+ return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
+ case DP_REMOTE_I2C_READ:
+ return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
+ case DP_ENUM_PATH_RESOURCES:
+ return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
+ case DP_ALLOCATE_PAYLOAD:
+ return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
+ default:
+ DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
+ return false;
+ }
+}
+
+static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
+{
+ int idx = 1;
+
+ msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
+ idx += 16;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
+ msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
+ msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
+ msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
+ msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
+ idx++;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
+{
+ int idx = 1;
+
+ msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
+ idx += 16;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
+ idx++;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+ msg->req_type = (raw->msg[0] & 0x7f);
+
+ switch (msg->req_type) {
+ case DP_CONNECTION_STATUS_NOTIFY:
+ return drm_dp_sideband_parse_connection_status_notify(raw, msg);
+ case DP_RESOURCE_STATUS_NOTIFY:
+ return drm_dp_sideband_parse_resource_status_notify(raw, msg);
+ default:
+ DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
+ return false;
+ }
+}
+
+static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_REMOTE_DPCD_WRITE;
+ req.u.dpcd_write.port_number = port_num;
+ req.u.dpcd_write.dpcd_address = offset;
+ req.u.dpcd_write.num_bytes = num_bytes;
+ req.u.dpcd_write.bytes = bytes;
+ drm_dp_encode_sideband_req(&req, msg);
+
+ return 0;
+}
+
+static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_LINK_ADDRESS;
+ drm_dp_encode_sideband_req(&req, msg);
+ return 0;
+}
+
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_ENUM_PATH_RESOURCES;
+ req.u.port_num.port_number = port_num;
+ drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
+ return 0;
+}
+
+static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
+ u8 vcpi, uint16_t pbn)
+{
+ struct drm_dp_sideband_msg_req_body req;
+ memset(&req, 0, sizeof(req));
+ req.req_type = DP_ALLOCATE_PAYLOAD;
+ req.u.allocate_payload.port_number = port_num;
+ req.u.allocate_payload.vcpi = vcpi;
+ req.u.allocate_payload.pbn = pbn;
+ drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
+ return 0;
+}
+
+static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_vcpi *vcpi)
+{
+ int ret;
+
+ mutex_lock(&mgr->payload_lock);
+ ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
+ if (ret > mgr->max_payloads) {
+ ret = -EINVAL;
+ DRM_DEBUG_KMS("out of payload ids %d\n", ret);
+ goto out_unlock;
+ }
+
+ set_bit(ret, &mgr->payload_mask);
+ vcpi->vcpi = ret;
+ mgr->proposed_vcpis[ret - 1] = vcpi;
+out_unlock:
+ mutex_unlock(&mgr->payload_lock);
+ return ret;
+}
+
+static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
+ int id)
+{
+ if (id == 0)
+ return;
+
+ mutex_lock(&mgr->payload_lock);
+ DRM_DEBUG_KMS("putting payload %d\n", id);
+ clear_bit(id, &mgr->payload_mask);
+ mgr->proposed_vcpis[id - 1] = NULL;
+ mutex_unlock(&mgr->payload_lock);
+}
+
+static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ bool ret;
+ mutex_lock(&mgr->qlock);
+ ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
+ mutex_unlock(&mgr->qlock);
+ return ret;
+}
+
+static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ int ret;
+
+ ret = wait_event_timeout(mgr->tx_waitq,
+ check_txmsg_state(mgr, txmsg),
+ (4 * HZ));
+ mutex_lock(&mstb->mgr->qlock);
+ if (ret > 0) {
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
+ ret = -EIO;
+ goto out;
+ }
+ } else {
+ DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
+
+ /* dump some state */
+ ret = -EIO;
+
+ /* remove from q */
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
+ list_del(&txmsg->next);
+ }
+
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
+ mstb->tx_slots[txmsg->seqno] = NULL;
+ }
+ }
+out:
+ mutex_unlock(&mgr->qlock);
+
+ return ret;
+}
+
+static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
+{
+ struct drm_dp_mst_branch *mstb;
+
+ mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
+ if (!mstb)
+ return NULL;
+
+ mstb->lct = lct;
+ if (lct > 1)
+ memcpy(mstb->rad, rad, lct / 2);
+ INIT_LIST_HEAD(&mstb->ports);
+ kref_init(&mstb->kref);
+ return mstb;
+}
+
+static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+ struct drm_dp_mst_port *port, *tmp;
+ bool wake_tx = false;
+
+ cancel_work_sync(&mstb->mgr->work);
+
+ /*
+ * destroy all ports - don't need lock
+ * as there are no more references to the mst branch
+ * device at this point.
+ */
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_del(&port->next);
+ drm_dp_put_port(port);
+ }
+
+ /* drop any tx slots msg */
+ mutex_lock(&mstb->mgr->qlock);
+ if (mstb->tx_slots[0]) {
+ mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[0] = NULL;
+ wake_tx = true;
+ }
+ if (mstb->tx_slots[1]) {
+ mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[1] = NULL;
+ wake_tx = true;
+ }
+ mutex_unlock(&mstb->mgr->qlock);
+
+ if (wake_tx)
+ wake_up(&mstb->mgr->tx_waitq);
+ kfree(mstb);
+}
+
+static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+{
+ kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
+}
+
+
+static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
+{
+ switch (old_pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ drm_dp_put_mst_branch_device(port->mstb);
+ port->mstb = NULL;
+ break;
+ }
+}
+
+static void drm_dp_destroy_port(struct kref *kref)
+{
+ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ if (!port->input) {
+ port->vcpi.num_slots = 0;
+ if (port->connector)
+ (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+ if (!port->input && port->vcpi.vcpi > 0)
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ }
+ kfree(port);
+
+ (*mgr->cbs->hotplug)(mgr);
+}
+
+static void drm_dp_put_port(struct drm_dp_mst_port *port)
+{
+ kref_put(&port->kref, drm_dp_destroy_port);
+}
+
+static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
+{
+ struct drm_dp_mst_port *port;
+ struct drm_dp_mst_branch *rmstb;
+ if (to_find == mstb) {
+ kref_get(&mstb->kref);
+ return mstb;
+ }
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->mstb) {
+ rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
+ if (rmstb)
+ return rmstb;
+ }
+ }
+ return NULL;
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_branch *rmstb = NULL;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+ rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
+ mutex_unlock(&mgr->lock);
+ return rmstb;
+}
+
+static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
+{
+ struct drm_dp_mst_port *port, *mport;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port == to_find) {
+ kref_get(&port->kref);
+ return port;
+ }
+ if (port->mstb) {
+ mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
+ if (mport)
+ return mport;
+ }
+ }
+ return NULL;
+}
+
+static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_port *rport = NULL;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+ rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
+ mutex_unlock(&mgr->lock);
+ return rport;
+}
+
+static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
+{
+ struct drm_dp_mst_port *port;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->port_num == port_num) {
+ kref_get(&port->kref);
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * calculate a new RAD for this MST branch device
+ * if parent has an LCT of 2 then it has 1 nibble of RAD,
+ * if parent has an LCT of 3 then it has 2 nibbles of RAD,
+ */
+static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
+ u8 *rad)
+{
+ int lct = port->parent->lct;
+ int shift = 4;
+ int idx = lct / 2;
+ if (lct > 1) {
+ memcpy(rad, port->parent->rad, idx);
+ shift = (lct % 2) ? 4 : 0;
+ } else
+ rad[0] = 0;
+
+ rad[idx] |= port->port_num << shift;
+ return lct + 1;
+}
+
+/*
+ * return sends link address for new mstb
+ */
+static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+{
+ int ret;
+ u8 rad[6], lct;
+ bool send_link = false;
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /* add i2c over sideband */
+ ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ lct = drm_dp_calculate_rad(port, rad);
+
+ port->mstb = drm_dp_add_mst_branch_device(lct, rad);
+ port->mstb->mgr = port->mgr;
+ port->mstb->port_parent = port;
+
+ send_link = true;
+ break;
+ }
+ return send_link;
+}
+
+static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ int ret;
+ if (port->dpcd_rev >= 0x12) {
+ port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
+ if (!port->guid_valid) {
+ ret = drm_dp_send_dpcd_write(mstb->mgr,
+ port,
+ DP_GUID,
+ 16, port->guid);
+ port->guid_valid = true;
+ }
+ }
+}
+
+static void build_mst_prop_path(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_branch *mstb,
+ char *proppath)
+{
+ int i;
+ char temp[8];
+ snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
+ for (i = 0; i < (mstb->lct - 1); i++) {
+ int shift = (i % 2) ? 0 : 4;
+ int port_num = mstb->rad[i / 2] >> shift;
+ snprintf(temp, 8, "-%d", port_num);
+ strncat(proppath, temp, 255);
+ }
+ snprintf(temp, 8, "-%d", port->port_num);
+ strncat(proppath, temp, 255);
+}
+
+static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ struct device *dev,
+ struct drm_dp_link_addr_reply_port *port_msg)
+{
+ struct drm_dp_mst_port *port;
+ bool ret;
+ bool created = false;
+ int old_pdt = 0;
+ int old_ddps = 0;
+ port = drm_dp_get_port(mstb, port_msg->port_number);
+ if (!port) {
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return;
+ kref_init(&port->kref);
+ port->parent = mstb;
+ port->port_num = port_msg->port_number;
+ port->mgr = mstb->mgr;
+ port->aux.name = "DPMST";
+ port->aux.dev = dev;
+ created = true;
+ } else {
+ old_pdt = port->pdt;
+ old_ddps = port->ddps;
+ }
+
+ port->pdt = port_msg->peer_device_type;
+ port->input = port_msg->input_port;
+ port->mcs = port_msg->mcs;
+ port->ddps = port_msg->ddps;
+ port->ldps = port_msg->legacy_device_plug_status;
+ port->dpcd_rev = port_msg->dpcd_revision;
+ port->num_sdp_streams = port_msg->num_sdp_streams;
+ port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
+ memcpy(port->guid, port_msg->peer_guid, 16);
+
+ /* manage mstb port lists with mgr lock - take a reference
+ for this list */
+ if (created) {
+ mutex_lock(&mstb->mgr->lock);
+ kref_get(&port->kref);
+ list_add(&port->next, &mstb->ports);
+ mutex_unlock(&mstb->mgr->lock);
+ }
+
+ if (old_ddps != port->ddps) {
+ if (port->ddps) {
+ drm_dp_check_port_guid(mstb, port);
+ if (!port->input)
+ drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+ } else {
+ port->guid_valid = false;
+ port->available_pbn = 0;
+ }
+ }
+
+ if (old_pdt != port->pdt && !port->input) {
+ drm_dp_port_teardown_pdt(port, old_pdt);
+
+ ret = drm_dp_port_setup_pdt(port);
+ if (ret == true) {
+ drm_dp_send_link_address(mstb->mgr, port->mstb);
+ port->mstb->link_address_sent = true;
+ }
+ }
+
+ if (created && !port->input) {
+ char proppath[255];
+ build_mst_prop_path(port, mstb, proppath);
+ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+ }
+
+ /* put reference to this port */
+ drm_dp_put_port(port);
+}
+
+static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_connection_status_notify *conn_stat)
+{
+ struct drm_dp_mst_port *port;
+ int old_pdt;
+ int old_ddps;
+ bool dowork = false;
+ port = drm_dp_get_port(mstb, conn_stat->port_number);
+ if (!port)
+ return;
+
+ old_ddps = port->ddps;
+ old_pdt = port->pdt;
+ port->pdt = conn_stat->peer_device_type;
+ port->mcs = conn_stat->message_capability_status;
+ port->ldps = conn_stat->legacy_device_plug_status;
+ port->ddps = conn_stat->displayport_device_plug_status;
+
+ if (old_ddps != port->ddps) {
+ if (port->ddps) {
+ drm_dp_check_port_guid(mstb, port);
+ dowork = true;
+ } else {
+ port->guid_valid = false;
+ port->available_pbn = 0;
+ }
+ }
+ if (old_pdt != port->pdt && !port->input) {
+ drm_dp_port_teardown_pdt(port, old_pdt);
+
+ if (drm_dp_port_setup_pdt(port))
+ dowork = true;
+ }
+
+ drm_dp_put_port(port);
+ if (dowork)
+ queue_work(system_long_wq, &mstb->mgr->work);
+
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
+ u8 lct, u8 *rad)
+{
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_port *port;
+ int i;
+ /* find the port by iterating down */
+ mstb = mgr->mst_primary;
+
+ for (i = 0; i < lct - 1; i++) {
+ int shift = (i % 2) ? 0 : 4;
+ int port_num = rad[i / 2] >> shift;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->port_num == port_num) {
+ if (!port->mstb) {
+ DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
+ return NULL;
+ }
+
+ mstb = port->mstb;
+ break;
+ }
+ }
+ }
+ kref_get(&mstb->kref);
+ return mstb;
+}
+
+static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+
+ if (!mstb->link_address_sent) {
+ drm_dp_send_link_address(mgr, mstb);
+ mstb->link_address_sent = true;
+ }
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->input)
+ continue;
+
+ if (!port->ddps)
+ continue;
+
+ if (!port->available_pbn)
+ drm_dp_send_enum_path_resources(mgr, mstb, port);
+
+ if (port->mstb)
+ drm_dp_check_and_send_link_address(mgr, port->mstb);
+ }
+}
+
+static void drm_dp_mst_link_probe_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+
+ drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
+
+}
+
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+ u8 *guid)
+{
+ static u8 zero_guid[16];
+
+ if (!memcmp(guid, zero_guid, 16)) {
+ u64 salt = get_jiffies_64();
+ memcpy(&guid[0], &salt, sizeof(u64));
+ memcpy(&guid[8], &salt, sizeof(u64));
+ return false;
+ }
+ return true;
+}
+
+#if 0
+static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_REMOTE_DPCD_READ;
+ req.u.dpcd_read.port_number = port_num;
+ req.u.dpcd_read.dpcd_address = offset;
+ req.u.dpcd_read.num_bytes = num_bytes;
+ drm_dp_encode_sideband_req(&req, msg);
+
+ return 0;
+}
+#endif
+
+static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
+ bool up, u8 *msg, int len)
+{
+ int ret;
+ int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
+ int tosend, total, offset;
+ int retries = 0;
+
+retry:
+ total = len;
+ offset = 0;
+ do {
+ tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
+
+ ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret != tosend) {
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ }
+ DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
+ WARN(1, "fail\n");
+
+ return -EIO;
+ }
+ offset += tosend;
+ total -= tosend;
+ } while (total > 0);
+ return 0;
+}
+
+static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_mst_branch *mstb = txmsg->dst;
+
+ /* both msg slots are full */
+ if (txmsg->seqno == -1) {
+ if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
+ DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
+ return -EAGAIN;
+ }
+ if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
+ txmsg->seqno = mstb->last_seqno;
+ mstb->last_seqno ^= 1;
+ } else if (mstb->tx_slots[0] == NULL)
+ txmsg->seqno = 0;
+ else
+ txmsg->seqno = 1;
+ mstb->tx_slots[txmsg->seqno] = txmsg;
+ }
+ hdr->broadcast = 0;
+ hdr->path_msg = txmsg->path_msg;
+ hdr->lct = mstb->lct;
+ hdr->lcr = mstb->lct - 1;
+ if (mstb->lct > 1)
+ memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
+ hdr->seqno = txmsg->seqno;
+ return 0;
+}
+/*
+ * process a single block of the next message in the sideband queue
+ */
+static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg,
+ bool up)
+{
+ u8 chunk[48];
+ struct drm_dp_sideband_msg_hdr hdr;
+ int len, space, idx, tosend;
+ int ret;
+
+ memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
+
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
+ txmsg->seqno = -1;
+ txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
+ }
+
+ /* make hdr from dst mst - for replies use seqno
+ otherwise assign one */
+ ret = set_hdr_from_dst_qlock(&hdr, txmsg);
+ if (ret < 0)
+ return ret;
+
+ /* amount left to send in this message */
+ len = txmsg->cur_len - txmsg->cur_offset;
+
+ /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
+ space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
+
+ tosend = min(len, space);
+ if (len == txmsg->cur_len)
+ hdr.somt = 1;
+ if (space >= len)
+ hdr.eomt = 1;
+
+
+ hdr.msg_len = tosend + 1;
+ drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
+ memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
+ /* add crc at end */
+ drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
+ idx += tosend + 1;
+
+ ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
+ if (ret) {
+ DRM_DEBUG_KMS("sideband msg failed to send\n");
+ return ret;
+ }
+
+ txmsg->cur_offset += tosend;
+ if (txmsg->cur_offset == txmsg->cur_len) {
+ txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
+ return 1;
+ }
+ return 0;
+}
+
+/* must be called holding qlock */
+static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ /* construct a chunk from the first msg in the tx_msg queue */
+ if (list_empty(&mgr->tx_msg_downq)) {
+ mgr->tx_down_in_progress = false;
+ return;
+ }
+ mgr->tx_down_in_progress = true;
+
+ txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
+ ret = process_single_tx_qlock(mgr, txmsg, false);
+ if (ret == 1) {
+ /* txmsg is sent it should be in the slots now */
+ list_del(&txmsg->next);
+ } else if (ret) {
+ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ list_del(&txmsg->next);
+ if (txmsg->seqno != -1)
+ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ wake_up(&mgr->tx_waitq);
+ }
+ if (list_empty(&mgr->tx_msg_downq)) {
+ mgr->tx_down_in_progress = false;
+ return;
+ }
+}
+
+/* called holding qlock */
+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ /* construct a chunk from the first msg in the tx_msg queue */
+ if (list_empty(&mgr->tx_msg_upq)) {
+ mgr->tx_up_in_progress = false;
+ return;
+ }
+
+ txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
+ ret = process_single_tx_qlock(mgr, txmsg, true);
+ if (ret == 1) {
+ /* up txmsgs aren't put in slots - so free after we send it */
+ list_del(&txmsg->next);
+ kfree(txmsg);
+ } else if (ret)
+ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->tx_up_in_progress = true;
+}
+
+static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ mutex_lock(&mgr->qlock);
+ list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
+ if (!mgr->tx_down_in_progress)
+ process_single_down_tx_qlock(mgr);
+ mutex_unlock(&mgr->qlock);
+}
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
+{
+ int len;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ txmsg->dst = mstb;
+ len = build_link_address(txmsg);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ int i;
+
+ if (txmsg->reply.reply_type == 1)
+ DRM_DEBUG_KMS("link address nak received\n");
+ else {
+ DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
+ for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
+ txmsg->reply.u.link_addr.ports[i].input_port,
+ txmsg->reply.u.link_addr.ports[i].peer_device_type,
+ txmsg->reply.u.link_addr.ports[i].port_number,
+ txmsg->reply.u.link_addr.ports[i].dpcd_revision,
+ txmsg->reply.u.link_addr.ports[i].mcs,
+ txmsg->reply.u.link_addr.ports[i].ddps,
+ txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
+ txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
+ txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
+ }
+ for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+ drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
+ }
+ (*mgr->cbs->hotplug)(mgr);
+ }
+ } else
+ DRM_DEBUG_KMS("link address failed %d\n", ret);
+
+ kfree(txmsg);
+ return 0;
+}
+
+static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ int len;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ txmsg->dst = mstb;
+ len = build_enum_path_resources(txmsg, port->port_num);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1)
+ DRM_DEBUG_KMS("enum path resources nak received\n");
+ else {
+ if (port->port_num != txmsg->reply.u.path_resources.port_number)
+ DRM_ERROR("got incorrect port in response\n");
+ DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
+ txmsg->reply.u.path_resources.avail_payload_bw_number);
+ port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
+ }
+ }
+
+ kfree(txmsg);
+ return 0;
+}
+
+static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+ int pbn)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ int len, ret;
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EINVAL;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto fail_put;
+ }
+
+ txmsg->dst = mstb;
+ len = build_allocate_payload(txmsg, port->port_num,
+ id,
+ pbn);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1) {
+ ret = -EINVAL;
+ } else
+ ret = 0;
+ }
+ kfree(txmsg);
+fail_put:
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+}
+
+static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write_payload(mgr, id, payload);
+ if (ret < 0) {
+ payload->payload_state = 0;
+ return ret;
+ }
+ payload->payload_state = DP_PAYLOAD_LOCAL;
+ return 0;
+}
+
+static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ int ret;
+ ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
+ if (ret < 0)
+ return ret;
+ payload->payload_state = DP_PAYLOAD_REMOTE;
+ return ret;
+}
+
+static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ DRM_DEBUG_KMS("\n");
+ /* its okay for these to fail */
+ if (port) {
+ drm_dp_payload_send_msg(mgr, port, id, 0);
+ }
+
+ drm_dp_dpcd_write_payload(mgr, id, payload);
+ payload->payload_state = 0;
+ return 0;
+}
+
+static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ payload->payload_state = 0;
+ return 0;
+}
+
+/**
+ * drm_dp_update_payload_part1() - Execute payload update part 1
+ * @mgr: manager to use.
+ *
+ * This iterates over all proposed virtual channels, and tries to
+ * allocate space in the link for them. For 0->slots transitions,
+ * this step just writes the VCPI to the MST device. For slots->0
+ * transitions, this writes the updated VCPIs and removes the
+ * remote VC payloads.
+ *
+ * after calling this the driver should generate ACT and payload
+ * packets.
+ */
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int i;
+ int cur_slots = 1;
+ struct drm_dp_payload req_payload;
+ struct drm_dp_mst_port *port;
+
+ mutex_lock(&mgr->payload_lock);
+ for (i = 0; i < mgr->max_payloads; i++) {
+ /* solve the current payloads - compare to the hw ones
+ - update the hw view */
+ req_payload.start_slot = cur_slots;
+ if (mgr->proposed_vcpis[i]) {
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ } else {
+ port = NULL;
+ req_payload.num_slots = 0;
+ }
+ /* work out what is required to happen with this payload */
+ if (mgr->payloads[i].start_slot != req_payload.start_slot ||
+ mgr->payloads[i].num_slots != req_payload.num_slots) {
+
+ /* need to push an update for this payload */
+ if (req_payload.num_slots) {
+ drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
+ mgr->payloads[i].num_slots = req_payload.num_slots;
+ } else if (mgr->payloads[i].num_slots) {
+ mgr->payloads[i].num_slots = 0;
+ drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
+ req_payload.payload_state = mgr->payloads[i].payload_state;
+ } else
+ req_payload.payload_state = 0;
+
+ mgr->payloads[i].start_slot = req_payload.start_slot;
+ mgr->payloads[i].payload_state = req_payload.payload_state;
+ }
+ cur_slots += req_payload.num_slots;
+ }
+ mutex_unlock(&mgr->payload_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_update_payload_part1);
+
+/**
+ * drm_dp_update_payload_part2() - Execute payload update part 2
+ * @mgr: manager to use.
+ *
+ * This iterates over all proposed virtual channels, and tries to
+ * allocate space in the link for them. For 0->slots transitions,
+ * this step writes the remote VC payload commands. For slots->0
+ * this just resets some internal state.
+ */
+int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_mst_port *port;
+ int i;
+ int ret = 0;
+ mutex_lock(&mgr->payload_lock);
+ for (i = 0; i < mgr->max_payloads; i++) {
+
+ if (!mgr->proposed_vcpis[i])
+ continue;
+
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+
+ DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
+ if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
+ ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
+ } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
+ ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
+ }
+ if (ret) {
+ mutex_unlock(&mgr->payload_lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&mgr->payload_lock);
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_update_payload_part2);
+
+#if 0 /* unused as of yet */
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size)
+{
+ int len;
+ struct drm_dp_sideband_msg_tx *txmsg;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ len = build_dpcd_read(txmsg, port->port_num, 0, 8);
+ txmsg->dst = port->parent;
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ return 0;
+}
+#endif
+
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes)
+{
+ int len;
+ int ret;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EINVAL;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto fail_put;
+ }
+
+ len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
+ txmsg->dst = mstb;
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1) {
+ ret = -EINVAL;
+ } else
+ ret = 0;
+ }
+ kfree(txmsg);
+fail_put:
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+}
+
+static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
+{
+ struct drm_dp_sideband_msg_reply_body reply;
+
+ reply.reply_type = 1;
+ reply.req_type = req_type;
+ drm_dp_encode_sideband_reply(&reply, msg);
+ return 0;
+}
+
+static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ int req_type, int seqno, bool broadcast)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ txmsg->dst = mstb;
+ txmsg->seqno = seqno;
+ drm_dp_encode_up_ack_reply(txmsg, req_type);
+
+ mutex_lock(&mgr->qlock);
+ list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
+ if (!mgr->tx_up_in_progress) {
+ process_single_up_tx_qlock(mgr);
+ }
+ mutex_unlock(&mgr->qlock);
+ return 0;
+}
+
+static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
+{
+ switch (dp_link_bw) {
+ case DP_LINK_BW_1_62:
+ return 3 * dp_link_count;
+ case DP_LINK_BW_2_7:
+ return 5 * dp_link_count;
+ case DP_LINK_BW_5_4:
+ return 10 * dp_link_count;
+ }
+ return 0;
+}
+
+/**
+ * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
+ * @mgr: manager to set state for
+ * @mst_state: true to enable MST on this connector - false to disable.
+ *
+ * This is called by the driver when it detects an MST capable device plugged
+ * into a DP MST capable port, or when a DP MST capable device is unplugged.
+ */
+int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
+{
+ int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
+ mutex_lock(&mgr->lock);
+ if (mst_state == mgr->mst_state)
+ goto out_unlock;
+
+ mgr->mst_state = mst_state;
+ /* set the device into MST mode */
+ if (mst_state) {
+ WARN_ON(mgr->mst_primary);
+
+ /* get dpcd info */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (ret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("failed to read DPCD\n");
+ goto out_unlock;
+ }
+
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ mgr->total_pbn = 2560;
+ mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
+ mgr->avail_slots = mgr->total_slots;
+
+ /* add initial branch device at LCT 1 */
+ mstb = drm_dp_add_mst_branch_device(1, NULL);
+ if (mstb == NULL) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ mstb->mgr = mgr;
+
+ /* give this the main reference */
+ mgr->mst_primary = mstb;
+ kref_get(&mgr->mst_primary->kref);
+
+ {
+ struct drm_dp_payload reset_pay;
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ goto out_unlock;
+ }
+
+
+ /* sort out guid */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
+ if (ret != 16) {
+ DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
+ goto out_unlock;
+ }
+
+ mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
+ if (!mgr->guid_valid) {
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
+ mgr->guid_valid = true;
+ }
+
+ queue_work(system_long_wq, &mgr->work);
+
+ ret = 0;
+ } else {
+ /* disable MST on the device */
+ mstb = mgr->mst_primary;
+ mgr->mst_primary = NULL;
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
+ memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ mgr->payload_mask = 0;
+ set_bit(0, &mgr->payload_mask);
+ }
+
+out_unlock:
+ mutex_unlock(&mgr->lock);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
+
+/**
+ * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
+ * @mgr: manager to suspend
+ *
+ * This function tells the MST device that we can't handle UP messages
+ * anymore. This should stop it from sending any since we are suspended.
+ */
+void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ mutex_unlock(&mgr->lock);
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
+
+/**
+ * drm_dp_mst_topology_mgr_resume() - resume the MST manager
+ * @mgr: manager to resume
+ *
+ * This will fetch DPCD and see if the device is still there,
+ * if it is, it will rewrite the MSTM control bits, and return.
+ *
+ * if the device fails this returns -1, and the driver should do
+ * a full MST reprobe, in case we were undocked.
+ */
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret = 0;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary) {
+ int sret;
+ sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (sret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ ret = 0;
+ } else
+ ret = -1;
+
+out_unlock:
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+
+static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+{
+ int len;
+ u8 replyblock[32];
+ int replylen, origlen, curreply;
+ int ret;
+ struct drm_dp_sideband_msg_rx *msg;
+ int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
+ msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+
+ len = min(mgr->max_dpcd_transaction_bytes, 16);
+ ret = drm_dp_dpcd_read(mgr->aux, basereg,
+ replyblock, len);
+ if (ret != len) {
+ DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
+ return;
+ }
+ ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+ if (!ret) {
+ DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
+ return;
+ }
+ replylen = msg->curchunk_len + msg->curchunk_hdrlen;
+
+ origlen = replylen;
+ replylen -= len;
+ curreply = len;
+ while (replylen > 0) {
+ len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
+ ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret != len) {
+ DRM_DEBUG_KMS("failed to read a chunk\n");
+ }
+ ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+ if (ret == false)
+ DRM_DEBUG_KMS("failed to build sideband msg\n");
+ curreply += len;
+ replylen -= len;
+ }
+}
+
+static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret = 0;
+
+ drm_dp_get_one_sb_msg(mgr, false);
+
+ if (mgr->down_rep_recv.have_eomt) {
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ int slot = -1;
+ mstb = drm_dp_get_mst_branch_device(mgr,
+ mgr->down_rep_recv.initial_hdr.lct,
+ mgr->down_rep_recv.initial_hdr.rad);
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
+ /* find the message */
+ slot = mgr->down_rep_recv.initial_hdr.seqno;
+ mutex_lock(&mgr->qlock);
+ txmsg = mstb->tx_slots[slot];
+ /* remove from slots */
+ mutex_unlock(&mgr->qlock);
+
+ if (!txmsg) {
+ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
+ mstb,
+ mgr->down_rep_recv.initial_hdr.seqno,
+ mgr->down_rep_recv.initial_hdr.lct,
+ mgr->down_rep_recv.initial_hdr.rad[0],
+ mgr->down_rep_recv.msg[0]);
+ drm_dp_put_mst_branch_device(mstb);
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
+ drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ if (txmsg->reply.reply_type == 1) {
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
+ }
+
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ drm_dp_put_mst_branch_device(mstb);
+
+ mutex_lock(&mgr->qlock);
+ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
+ mstb->tx_slots[slot] = NULL;
+ mutex_unlock(&mgr->qlock);
+
+ wake_up(&mgr->tx_waitq);
+ }
+ return ret;
+}
+
+static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret = 0;
+ drm_dp_get_one_sb_msg(mgr, true);
+
+ if (mgr->up_req_recv.have_eomt) {
+ struct drm_dp_sideband_msg_req_body msg;
+ struct drm_dp_mst_branch *mstb;
+ bool seqno;
+ mstb = drm_dp_get_mst_branch_device(mgr,
+ mgr->up_req_recv.initial_hdr.lct,
+ mgr->up_req_recv.initial_hdr.rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
+ seqno = mgr->up_req_recv.initial_hdr.seqno;
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+
+ if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ drm_dp_update_port(mstb, &msg.u.conn_stat);
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+ (*mgr->cbs->hotplug)(mgr);
+
+ } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
+ }
+
+ drm_dp_put_mst_branch_device(mstb);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ }
+ return ret;
+}
+
+/**
+ * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
+ * @mgr: manager to notify irq for.
+ * @esi: 4 bytes from SINK_COUNT_ESI
+ *
+ * This should be called from the driver when it detects a short IRQ,
+ * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
+ * topology manager will process the sideband messages received as a result
+ * of this.
+ */
+int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
+{
+ int ret = 0;
+ int sc;
+ *handled = false;
+ sc = esi[0] & 0x3f;
+
+ if (sc != mgr->sink_count) {
+ mgr->sink_count = sc;
+ *handled = true;
+ }
+
+ if (esi[1] & DP_DOWN_REP_MSG_RDY) {
+ ret = drm_dp_mst_handle_down_rep(mgr);
+ *handled = true;
+ }
+
+ if (esi[1] & DP_UP_REQ_MSG_RDY) {
+ ret |= drm_dp_mst_handle_up_req(mgr);
+ *handled = true;
+ }
+
+ drm_dp_mst_kick_tx(mgr);
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
+
+/**
+ * drm_dp_mst_detect_port() - get connection status for an MST port
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port
+ *
+ * This returns the current connection state for a port. It validates the
+ * port pointer still exists so the caller doesn't require a reference
+ */
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ enum drm_connector_status status = connector_status_disconnected;
+
+ /* we need to search for the port in the mgr in case its gone */
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return connector_status_disconnected;
+
+ if (!port->ddps)
+ goto out;
+
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_NONE:
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ break;
+
+ case DP_PEER_DEVICE_SST_SINK:
+ status = connector_status_connected;
+ break;
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ if (port->ldps)
+ status = connector_status_connected;
+ break;
+ }
+out:
+ drm_dp_put_port(port);
+ return status;
+}
+EXPORT_SYMBOL(drm_dp_mst_detect_port);
+
+/**
+ * drm_dp_mst_get_edid() - get EDID for an MST port
+ * @connector: toplevel connector to get EDID for
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port.
+ *
+ * This returns an EDID for the port connected to a connector,
+ * It validates the pointer still exists so the caller doesn't require a
+ * reference.
+ */
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ struct edid *edid = NULL;
+
+ /* we need to search for the port in the mgr in case its gone */
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return NULL;
+
+ edid = drm_get_edid(connector, &port->aux.ddc);
+ drm_dp_put_port(port);
+ return edid;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_edid);
+
+/**
+ * drm_dp_find_vcpi_slots() - find slots for this PBN value
+ * @mgr: manager to use
+ * @pbn: payload bandwidth to convert into slots.
+ */
+int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
+ int pbn)
+{
+ int num_slots;
+
+ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+ if (num_slots > mgr->avail_slots)
+ return -ENOSPC;
+ return num_slots;
+}
+EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
+
+static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_vcpi *vcpi, int pbn)
+{
+ int num_slots;
+ int ret;
+
+ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+ if (num_slots > mgr->avail_slots)
+ return -ENOSPC;
+
+ vcpi->pbn = pbn;
+ vcpi->aligned_pbn = num_slots * mgr->pbn_div;
+ vcpi->num_slots = num_slots;
+
+ ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
+ * @mgr: manager for this port
+ * @port: port to allocate a virtual channel for.
+ * @pbn: payload bandwidth number to request
+ * @slots: returned number of slots for this PBN.
+ */
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
+{
+ int ret;
+
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return false;
+
+ if (port->vcpi.vcpi > 0) {
+ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ if (pbn == port->vcpi.pbn) {
+ *slots = port->vcpi.num_slots;
+ return true;
+ }
+ }
+
+ ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
+ goto out;
+ }
+ DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
+ *slots = port->vcpi.num_slots;
+
+ drm_dp_put_port(port);
+ return true;
+out:
+ return false;
+}
+EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
+
+/**
+ * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port.
+ *
+ * This just resets the number of slots for the ports VCPI for later programming.
+ */
+void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return;
+ port->vcpi.num_slots = 0;
+ drm_dp_put_port(port);
+}
+EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
+
+/**
+ * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
+ * @mgr: manager for this port
+ * @port: unverified port to deallocate vcpi for
+ */
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return;
+
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ port->vcpi.num_slots = 0;
+ port->vcpi.pbn = 0;
+ port->vcpi.aligned_pbn = 0;
+ port->vcpi.vcpi = 0;
+ drm_dp_put_port(port);
+}
+EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
+
+static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+ int id, struct drm_dp_payload *payload)
+{
+ u8 payload_alloc[3], status;
+ int ret;
+ int retries = 0;
+
+ drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
+
+ payload_alloc[0] = id;
+ payload_alloc[1] = payload->start_slot;
+ payload_alloc[2] = payload->num_slots;
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret != 3) {
+ DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
+ goto fail;
+ }
+
+retry:
+ ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
+ retries++;
+ if (retries < 20) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+
+/**
+ * drm_dp_check_act_status() - Check ACT handled status.
+ * @mgr: manager to use
+ *
+ * Check the payload status bits in the DPCD for ACT handled completion.
+ */
+int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+ u8 status;
+ int ret;
+ int count = 0;
+
+ do {
+ ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (status & DP_PAYLOAD_ACT_HANDLED)
+ break;
+ count++;
+ udelay(100);
+
+ } while (count < 30);
+
+ if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
+ DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
+ ret = -EINVAL;
+ goto fail;
+ }
+ return 0;
+fail:
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_check_act_status);
+
+/**
+ * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
+ * @clock: dot clock for the mode
+ * @bpp: bpp for the mode.
+ *
+ * This uses the formula in the spec to calculate the PBN value for a mode.
+ */
+int drm_dp_calc_pbn_mode(int clock, int bpp)
+{
+ fixed20_12 pix_bw;
+ fixed20_12 fbpp;
+ fixed20_12 result;
+ fixed20_12 margin, tmp;
+ u32 res;
+
+ pix_bw.full = dfixed_const(clock);
+ fbpp.full = dfixed_const(bpp);
+ tmp.full = dfixed_const(8);
+ fbpp.full = dfixed_div(fbpp, tmp);
+
+ result.full = dfixed_mul(pix_bw, fbpp);
+ margin.full = dfixed_const(54);
+ tmp.full = dfixed_const(64);
+ margin.full = dfixed_div(margin, tmp);
+ result.full = dfixed_div(result, margin);
+
+ margin.full = dfixed_const(1006);
+ tmp.full = dfixed_const(1000);
+ margin.full = dfixed_div(margin, tmp);
+ result.full = dfixed_mul(result, margin);
+
+ result.full = dfixed_div(result, tmp);
+ result.full = dfixed_ceil(result);
+ res = dfixed_trunc(result);
+ return res;
+}
+EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
+
+static int test_calc_pbn_mode(void)
+{
+ int ret;
+ ret = drm_dp_calc_pbn_mode(154000, 30);
+ if (ret != 689)
+ return -EINVAL;
+ ret = drm_dp_calc_pbn_mode(234000, 30);
+ if (ret != 1047)
+ return -EINVAL;
+ return 0;
+}
+
+/* we want to kick the TX after we've ack the up/down IRQs. */
+static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
+{
+ queue_work(system_long_wq, &mgr->tx_work);
+}
+
+static void drm_dp_mst_dump_mstb(struct seq_file *m,
+ struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+ int tabs = mstb->lct;
+ char prefix[10];
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ prefix[i] = '\t';
+ prefix[i] = '\0';
+
+ seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
+ list_for_each_entry(port, &mstb->ports, next) {
+ seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
+ if (port->mstb)
+ drm_dp_mst_dump_mstb(m, port->mstb);
+ }
+}
+
+static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
+ char *buf)
+{
+ int ret;
+ int i;
+ for (i = 0; i < 4; i++) {
+ ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
+ if (ret != 16)
+ break;
+ }
+ if (i == 4)
+ return true;
+ return false;
+}
+
+/**
+ * drm_dp_mst_dump_topology(): dump topology to seq file.
+ * @m: seq_file to dump output to
+ * @mgr: manager to dump current topology for.
+ *
+ * helper to dump MST topology to a seq file for debugfs.
+ */
+void drm_dp_mst_dump_topology(struct seq_file *m,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ int i;
+ struct drm_dp_mst_port *port;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+ drm_dp_mst_dump_mstb(m, mgr->mst_primary);
+
+ /* dump VCPIs */
+ mutex_unlock(&mgr->lock);
+
+ mutex_lock(&mgr->payload_lock);
+ seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
+
+ for (i = 0; i < mgr->max_payloads; i++) {
+ if (mgr->proposed_vcpis[i]) {
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
+ } else
+ seq_printf(m, "vcpi %d:unsed\n", i);
+ }
+ for (i = 0; i < mgr->max_payloads; i++) {
+ seq_printf(m, "payload %d: %d, %d, %d\n",
+ i,
+ mgr->payloads[i].payload_state,
+ mgr->payloads[i].start_slot,
+ mgr->payloads[i].num_slots);
+
+
+ }
+ mutex_unlock(&mgr->payload_lock);
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary) {
+ u8 buf[64];
+ bool bret;
+ int ret;
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
+ seq_printf(m, "dpcd: ");
+ for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+ ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
+ seq_printf(m, "faux/mst: ");
+ for (i = 0; i < 2; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+ ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ seq_printf(m, "mst ctrl: ");
+ for (i = 0; i < 1; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+
+ bret = dump_dp_payload_table(mgr, buf);
+ if (bret == true) {
+ seq_printf(m, "payload table: ");
+ for (i = 0; i < 63; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+ }
+
+ }
+
+ mutex_unlock(&mgr->lock);
+
+}
+EXPORT_SYMBOL(drm_dp_mst_dump_topology);
+
+static void drm_dp_tx_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
+
+ mutex_lock(&mgr->qlock);
+ if (mgr->tx_down_in_progress)
+ process_single_down_tx_qlock(mgr);
+ mutex_unlock(&mgr->qlock);
+}
+
+/**
+ * drm_dp_mst_topology_mgr_init - initialise a topology manager
+ * @mgr: manager struct to initialise
+ * @dev: device providing this structure - for i2c addition.
+ * @aux: DP helper aux channel to talk to this device
+ * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
+ * @max_payloads: maximum number of payloads this GPU can source
+ * @conn_base_id: the connector object ID the MST device is connected to.
+ *
+ * Return 0 for success, or negative error code on failure
+ */
+int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ struct device *dev, struct drm_dp_aux *aux,
+ int max_dpcd_transaction_bytes,
+ int max_payloads, int conn_base_id)
+{
+ mutex_init(&mgr->lock);
+ mutex_init(&mgr->qlock);
+ mutex_init(&mgr->payload_lock);
+ INIT_LIST_HEAD(&mgr->tx_msg_upq);
+ INIT_LIST_HEAD(&mgr->tx_msg_downq);
+ INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+ INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
+ init_waitqueue_head(&mgr->tx_waitq);
+ mgr->dev = dev;
+ mgr->aux = aux;
+ mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
+ mgr->max_payloads = max_payloads;
+ mgr->conn_base_id = conn_base_id;
+ mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
+ if (!mgr->payloads)
+ return -ENOMEM;
+ mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
+ if (!mgr->proposed_vcpis)
+ return -ENOMEM;
+ set_bit(0, &mgr->payload_mask);
+ test_calc_pbn_mode();
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
+
+/**
+ * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
+ * @mgr: manager to destroy
+ */
+void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->payload_lock);
+ kfree(mgr->payloads);
+ mgr->payloads = NULL;
+ kfree(mgr->proposed_vcpis);
+ mgr->proposed_vcpis = NULL;
+ mutex_unlock(&mgr->payload_lock);
+ mgr->dev = NULL;
+ mgr->aux = NULL;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
+
+/* I2C device */
+static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ struct drm_dp_aux *aux = adapter->algo_data;
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ unsigned int i;
+ bool reading = false;
+ struct drm_dp_sideband_msg_req_body msg;
+ struct drm_dp_sideband_msg_tx *txmsg = NULL;
+ int ret;
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EREMOTEIO;
+
+ /* construct i2c msg */
+ /* see if last msg is a read */
+ if (msgs[num - 1].flags & I2C_M_RD)
+ reading = true;
+
+ if (!reading) {
+ DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ msg.req_type = DP_REMOTE_I2C_READ;
+ msg.u.i2c_read.num_transactions = num - 1;
+ msg.u.i2c_read.port_number = port->port_num;
+ for (i = 0; i < num - 1; i++) {
+ msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
+ msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
+ msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
+ }
+ msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
+ msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ txmsg->dst = mstb;
+ drm_dp_encode_sideband_req(&msg, txmsg);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+
+ if (txmsg->reply.reply_type == 1) { /* got a NAK back */
+ ret = -EREMOTEIO;
+ goto out;
+ }
+ if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
+ ret = -EIO;
+ goto out;
+ }
+ memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
+ ret = num;
+ }
+out:
+ kfree(txmsg);
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+}
+
+static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
+ .functionality = drm_dp_mst_i2c_functionality,
+ .master_xfer = drm_dp_mst_i2c_xfer,
+};
+
+/**
+ * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
+{
+ aux->ddc.algo = &drm_dp_mst_i2c_algo;
+ aux->ddc.algo_data = aux;
+ aux->ddc.retries = 3;
+
+ aux->ddc.class = I2C_CLASS_DDC;
+ aux->ddc.owner = THIS_MODULE;
+ aux->ddc.dev.parent = aux->dev;
+ aux->ddc.dev.of_node = aux->dev->of_node;
+
+ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
+ sizeof(aux->ddc.name));
+
+ return i2c_add_adapter(&aux->ddc);
+}
+
+/**
+ * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
+ * @aux: DisplayPort AUX channel
+ */
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
+{
+ i2c_del_adapter(&aux->ddc);
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8218078b6133..92bc6b1d9646 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -1,31 +1,11 @@
-/**
- * \file drm_drv.c
- * Generic driver template
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- *
- * To use this template, you must at least define the following (samples
- * given for the MGA driver):
- *
- * \code
- * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
- *
- * #define DRIVER_NAME "mga"
- * #define DRIVER_DESC "Matrox G200/G400"
- * #define DRIVER_DATE "20001127"
- *
- * #define drm_x mga_##x
- * \endcode
- */
-
/*
- * Created: Thu Nov 23 03:10:50 2000 by [email protected]
+ * Created: Fri Jan 19 10:48:35 2001 by [email protected]
*
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author Rickard E. (Rik) Faith <[email protected]>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -40,432 +20,902 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mount.h>
#include <linux/slab.h>
-#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include "drm_legacy.h"
+unsigned int drm_debug = 0; /* 1 to enable debug output */
+EXPORT_SYMBOL(drm_debug);
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
-
-/** Ioctl table */
-static const struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if __OS_HAS_AGP
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-};
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
-/** File operations structure */
-static const struct file_operations drm_stub_fops = {
- .owner = THIS_MODULE,
- .open = drm_stub_open,
- .llseek = noop_llseek,
-};
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
-static int __init drm_core_init(void)
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
+
+module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
+
+static DEFINE_SPINLOCK(drm_minor_lock);
+static struct idr drm_minors_idr;
+
+struct class *drm_class;
+static struct dentry *drm_debugfs_root;
+
+int drm_err(const char *func, const char *format, ...)
{
- int ret = -ENOMEM;
+ struct va_format vaf;
+ va_list args;
+ int r;
- drm_global_init();
- drm_connector_ida_init();
- idr_init(&drm_minors_idr);
+ va_start(args, format);
- if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
- goto err_p1;
+ vaf.fmt = format;
+ vaf.va = &args;
- drm_class = drm_sysfs_create(THIS_MODULE, "drm");
- if (IS_ERR(drm_class)) {
- printk(KERN_ERR "DRM: Error creating drm class.\n");
- ret = PTR_ERR(drm_class);
- goto err_p2;
+ r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(drm_err);
+
+void drm_ut_debug_printk(const char *function_name, const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_ut_debug_printk);
+
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+ struct drm_master *master;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return NULL;
+
+ kref_init(&master->refcount);
+ spin_lock_init(&master->lock.spinlock);
+ init_waitqueue_head(&master->lock.lock_queue);
+ if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
+ kfree(master);
+ return NULL;
}
+ INIT_LIST_HEAD(&master->magicfree);
+ master->minor = minor;
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
- if (!drm_debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
- ret = -1;
- goto err_p3;
+ return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+ kref_get(&master->refcount);
+ return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+ struct drm_master *master = container_of(kref, struct drm_master, refcount);
+ struct drm_magic_entry *pt, *next;
+ struct drm_device *dev = master->minor->dev;
+ struct drm_map_list *r_list, *list_temp;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_destroy)
+ dev->driver->master_destroy(dev, master);
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+ if (r_list->master == master) {
+ drm_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
+ }
}
- DRM_INFO("Initialized %s %d.%d.%d %s\n",
- CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
- return 0;
-err_p3:
- drm_sysfs_destroy();
-err_p2:
- unregister_chrdev(DRM_MAJOR, "drm");
+ if (master->unique) {
+ kfree(master->unique);
+ master->unique = NULL;
+ master->unique_len = 0;
+ }
- idr_destroy(&drm_minors_idr);
-err_p1:
- return ret;
+ list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+ list_del(&pt->head);
+ drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+ kfree(pt);
+ }
+
+ drm_ht_remove(&master->magiclist);
+
+ mutex_unlock(&dev->struct_mutex);
+ kfree(master);
}
-static void __exit drm_core_exit(void)
+void drm_master_put(struct drm_master **master)
{
- debugfs_remove(drm_debugfs_root);
- drm_sysfs_destroy();
+ kref_put(&(*master)->refcount, drm_master_destroy);
+ *master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
- unregister_chrdev(DRM_MAJOR, "drm");
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = 0;
- drm_connector_ida_destroy();
- idr_destroy(&drm_minors_idr);
+ mutex_lock(&dev->master_mutex);
+ if (drm_is_master(file_priv))
+ goto out_unlock;
+
+ if (file_priv->minor->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!file_priv->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0))
+ drm_master_put(&file_priv->minor->master);
+ }
+
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
}
-module_init(drm_core_init);
-module_exit(drm_core_exit);
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = -EINVAL;
-/**
- * Copy and IOCTL return string to user space
+ mutex_lock(&dev->master_mutex);
+ if (!drm_is_master(file_priv))
+ goto out_unlock;
+
+ if (!file_priv->minor->master)
+ goto out_unlock;
+
+ ret = 0;
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, false);
+ drm_master_put(&file_priv->minor->master);
+
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
+}
+
+/*
+ * DRM Minors
+ * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
+ * of them is represented by a drm_minor object. Depending on the capabilities
+ * of the device-driver, different interfaces are registered.
+ *
+ * Minors can be accessed via dev->$minor_name. This pointer is either
+ * NULL or a valid drm_minor pointer and stays valid as long as the device is
+ * valid. This means, DRM minors have the same life-time as the underlying
+ * device. However, this doesn't mean that the minor is active. Minors are
+ * registered and unregistered dynamically according to device-state.
*/
-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+
+static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
+ unsigned int type)
{
- int len;
+ switch (type) {
+ case DRM_MINOR_LEGACY:
+ return &dev->primary;
+ case DRM_MINOR_RENDER:
+ return &dev->render;
+ case DRM_MINOR_CONTROL:
+ return &dev->control;
+ default:
+ return NULL;
+ }
+}
- /* don't overflow userbuf */
- len = strlen(value);
- if (len > *buf_len)
- len = *buf_len;
+static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+ int r;
+
+ minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+ if (!minor)
+ return -ENOMEM;
+
+ minor->type = type;
+ minor->dev = dev;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ r = idr_alloc(&drm_minors_idr,
+ NULL,
+ 64 * type,
+ 64 * (type + 1),
+ GFP_NOWAIT);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+ idr_preload_end();
+
+ if (r < 0)
+ goto err_free;
+
+ minor->index = r;
+
+ minor->kdev = drm_sysfs_minor_alloc(minor);
+ if (IS_ERR(minor->kdev)) {
+ r = PTR_ERR(minor->kdev);
+ goto err_index;
+ }
- /* let userspace know exact length of driver value (which could be
- * larger than the userspace-supplied buffer) */
- *buf_len = strlen(value);
+ *drm_minor_get_slot(dev, type) = minor;
+ return 0;
- /* finally, try filling in the userbuf */
- if (len && buf)
- if (copy_to_user(buf, value, len))
- return -EFAULT;
+err_index:
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+err_free:
+ kfree(minor);
+ return r;
+}
+
+static void drm_minor_free(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor **slot, *minor;
+ unsigned long flags;
+
+ slot = drm_minor_get_slot(dev, type);
+ minor = *slot;
+ if (!minor)
+ return;
+
+ drm_mode_group_destroy(&minor->mode_group);
+ put_device(minor->kdev);
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ kfree(minor);
+ *slot = NULL;
+}
+
+static int drm_minor_register(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ minor = *drm_minor_get_slot(dev, type);
+ if (!minor)
+ return 0;
+
+ ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
+ if (ret) {
+ DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+ return ret;
+ }
+
+ ret = device_add(minor->kdev);
+ if (ret)
+ goto err_debugfs;
+
+ /* replace NULL with @minor so lookups will succeed from now on */
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_replace(&drm_minors_idr, minor, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ DRM_DEBUG("new minor registered %d\n", minor->index);
return 0;
+
+err_debugfs:
+ drm_debugfs_cleanup(minor);
+ return ret;
+}
+
+static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ minor = *drm_minor_get_slot(dev, type);
+ if (!minor || !device_is_registered(minor->kdev))
+ return;
+
+ /* replace @minor with NULL so lookups will fail from now on */
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_replace(&drm_minors_idr, NULL, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ device_del(minor->kdev);
+ dev_set_drvdata(minor->kdev, NULL); /* safety belt */
+ drm_debugfs_cleanup(minor);
}
/**
- * Get version information
+ * drm_minor_acquire - Acquire a DRM minor
+ * @minor_id: Minor ID of the DRM-minor
*
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_version structure.
- * \return zero on success or negative number on failure.
+ * Looks up the given minor-ID and returns the respective DRM-minor object. The
+ * refence-count of the underlying device is increased so you must release this
+ * object with drm_minor_release().
*
- * Fills in the version information in \p arg.
+ * As long as you hold this minor, it is guaranteed that the object and the
+ * minor->dev pointer will stay valid! However, the device may get unplugged and
+ * unregistered while you hold the minor.
+ *
+ * Returns:
+ * Pointer to minor-object with increased device-refcount, or PTR_ERR on
+ * failure.
*/
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
- struct drm_version *version = data;
- int err;
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (minor)
+ drm_dev_ref(minor->dev);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ if (!minor) {
+ return ERR_PTR(-ENODEV);
+ } else if (drm_device_is_unplugged(minor->dev)) {
+ drm_dev_unref(minor->dev);
+ return ERR_PTR(-ENODEV);
+ }
- version->version_major = dev->driver->major;
- version->version_minor = dev->driver->minor;
- version->version_patchlevel = dev->driver->patchlevel;
- err = drm_copy_field(version->name, &version->name_len,
- dev->driver->name);
- if (!err)
- err = drm_copy_field(version->date, &version->date_len,
- dev->driver->date);
- if (!err)
- err = drm_copy_field(version->desc, &version->desc_len,
- dev->driver->desc);
+ return minor;
+}
- return err;
+/**
+ * drm_minor_release - Release DRM minor
+ * @minor: Pointer to DRM minor object
+ *
+ * Release a minor that was previously acquired via drm_minor_acquire().
+ */
+void drm_minor_release(struct drm_minor *minor)
+{
+ drm_dev_unref(minor->dev);
}
/**
- * drm_ioctl_permit - Check ioctl permissions against caller
+ * drm_put_dev - Unregister and release a DRM device
+ * @dev: DRM device
*
- * @flags: ioctl permission flags.
- * @file_priv: Pointer to struct drm_file identifying the caller.
+ * Called at module unload time or when a PCI device is unplugged.
*
- * Checks whether the caller is allowed to run an ioctl with the
- * indicated permissions. If so, returns zero. Otherwise returns an
- * error code suitable for ioctl return.
+ * Use of this function is discouraged. It will eventually go away completely.
+ * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
*/
-static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+void drm_put_dev(struct drm_device *dev)
{
- /* ROOT_ONLY is only for CAP_SYS_ADMIN */
- if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
- return -EACCES;
-
- /* AUTH is only for authenticated or render client */
- if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
- !file_priv->authenticated))
- return -EACCES;
-
- /* MASTER is only for master or control clients */
- if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
- !drm_is_control_client(file_priv)))
- return -EACCES;
-
- /* Control clients must be explicitly allowed */
- if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
- drm_is_control_client(file_priv)))
- return -EACCES;
-
- /* Render clients must be explicitly allowed */
- if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
- drm_is_render_client(file_priv)))
- return -EACCES;
+ DRM_DEBUG("\n");
- return 0;
+ if (!dev) {
+ DRM_ERROR("cleanup called no dev\n");
+ return;
+ }
+
+ drm_dev_unregister(dev);
+ drm_dev_unref(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
+
+void drm_unplug_dev(struct drm_device *dev)
+{
+ /* for a USB device */
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+
+ mutex_lock(&drm_global_mutex);
+
+ drm_device_set_unplugged(dev);
+
+ if (dev->open_count == 0) {
+ drm_put_dev(dev);
+ }
+ mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
+
+/*
+ * DRM internal mount
+ * We want to be able to allocate our own "struct address_space" to control
+ * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
+ * stand-alone address_space objects, so we need an underlying inode. As there
+ * is no way to allocate an independent inode easily, we need a fake internal
+ * VFS mount-point.
+ *
+ * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
+ * frees it again. You are allowed to use iget() and iput() to get references to
+ * the inode. But each drm_fs_inode_new() call must be paired with exactly one
+ * drm_fs_inode_free() call (which does not have to be the last iput()).
+ * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
+ * between multiple inode-users. You could, technically, call
+ * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
+ * iput(), but this way you'd end up with a new vfsmount for each inode.
+ */
+
+static int drm_fs_cnt;
+static struct vfsmount *drm_fs_mnt;
+
+static const struct dentry_operations drm_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+static const struct super_operations drm_fs_sops = {
+ .statfs = simple_statfs,
+};
+
+static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type,
+ "drm:",
+ &drm_fs_sops,
+ &drm_fs_dops,
+ 0x010203ff);
+}
+
+static struct file_system_type drm_fs_type = {
+ .name = "drm",
+ .owner = THIS_MODULE,
+ .mount = drm_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static struct inode *drm_fs_inode_new(void)
+{
+ struct inode *inode;
+ int r;
+
+ r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
+ if (r < 0) {
+ DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
+ return ERR_PTR(r);
+ }
+
+ inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
+ if (IS_ERR(inode))
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+
+ return inode;
+}
+
+static void drm_fs_inode_free(struct inode *inode)
+{
+ if (inode) {
+ iput(inode);
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+ }
}
/**
- * Called whenever a process performs an ioctl on /dev/drm.
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * The initial ref-count of the object is 1. Use drm_dev_ref() and
+ * drm_dev_unref() to take and drop further ref-counts.
*
- * Looks up the ioctl function in the ::ioctls table, checking for root
- * previleges if so required, and dispatches to the respective function.
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
*/
-long drm_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent)
{
- struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
- const struct drm_ioctl_desc *ioctl = NULL;
- drm_ioctl_t *func;
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int retcode = -EINVAL;
- char stack_kdata[128];
- char *kdata = NULL;
- unsigned int usize, asize;
-
- dev = file_priv->minor->dev;
-
- if (drm_device_is_unplugged(dev))
- return -ENODEV;
-
- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
- goto err_i1;
- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- u32 drv_size;
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- drv_size = _IOC_SIZE(ioctl->cmd_drv);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
- cmd = ioctl->cmd_drv;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ kref_init(&dev->ref);
+ dev->dev = parent;
+ dev->driver = driver;
+
+ INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
+
+ spin_lock_init(&dev->buf_lock);
+ spin_lock_init(&dev->event_lock);
+ mutex_init(&dev->struct_mutex);
+ mutex_init(&dev->ctxlist_mutex);
+ mutex_init(&dev->master_mutex);
+
+ dev->anon_inode = drm_fs_inode_new();
+ if (IS_ERR(dev->anon_inode)) {
+ ret = PTR_ERR(dev->anon_inode);
+ DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
+ goto err_free;
}
- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
- u32 drv_size;
- ioctl = &drm_ioctls[nr];
-
- drv_size = _IOC_SIZE(ioctl->cmd);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_minors;
+ }
- cmd = ioctl->cmd;
- } else
- goto err_i1;
+ if (drm_core_check_feature(dev, DRIVER_RENDER)) {
+ ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_minors;
+ }
- DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, ioctl->name);
+ ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_minors;
- /* Do not trust userspace, use our own definition */
- func = ioctl->func;
+ if (drm_ht_create(&dev->map_hash, 12))
+ goto err_minors;
- if (unlikely(!func)) {
- DRM_DEBUG("no function\n");
- retcode = -EINVAL;
- goto err_i1;
+ ret = drm_legacy_ctxbitmap_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto err_ht;
}
- retcode = drm_ioctl_permit(ioctl->flags, file_priv);
- if (unlikely(retcode))
- goto err_i1;
-
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (asize <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(asize, GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
+ if (driver->driver_features & DRIVER_GEM) {
+ ret = drm_gem_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto err_ctxbitmap;
}
- if (asize > usize)
- memset(kdata + usize, 0, asize - usize);
}
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- usize) != 0) {
- retcode = -EFAULT;
- goto err_i1;
- }
- } else if (cmd & IOC_OUT) {
- memset(kdata, 0, usize);
- }
+ return dev;
+
+err_ctxbitmap:
+ drm_legacy_ctxbitmap_cleanup(dev);
+err_ht:
+ drm_ht_remove(&dev->map_hash);
+err_minors:
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+ drm_fs_inode_free(dev->anon_inode);
+err_free:
+ mutex_destroy(&dev->master_mutex);
+ kfree(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+static void drm_dev_release(struct kref *ref)
+{
+ struct drm_device *dev = container_of(ref, struct drm_device, ref);
+
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_destroy(dev);
+
+ drm_legacy_ctxbitmap_cleanup(dev);
+ drm_ht_remove(&dev->map_hash);
+ drm_fs_inode_free(dev->anon_inode);
- if (ioctl->flags & DRM_UNLOCKED)
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+
+ mutex_destroy(&dev->master_mutex);
+ kfree(dev->unique);
+ kfree(dev);
+}
+
+/**
+ * drm_dev_ref - Take reference of a DRM device
+ * @dev: device to take reference of or NULL
+ *
+ * This increases the ref-count of @dev by one. You *must* already own a
+ * reference when calling this. Use drm_dev_unref() to drop this reference
+ * again.
+ *
+ * This function never fails. However, this function does not provide *any*
+ * guarantee whether the device is alive or running. It only provides a
+ * reference to the object and the memory associated with it.
+ */
+void drm_dev_ref(struct drm_device *dev)
+{
+ if (dev)
+ kref_get(&dev->ref);
+}
+EXPORT_SYMBOL(drm_dev_ref);
+
+/**
+ * drm_dev_unref - Drop reference of a DRM device
+ * @dev: device to drop reference of or NULL
+ *
+ * This decreases the ref-count of @dev by one. The device is destroyed if the
+ * ref-count drops to zero.
+ */
+void drm_dev_unref(struct drm_device *dev)
+{
+ if (dev)
+ kref_put(&dev->ref, drm_dev_release);
+}
+EXPORT_SYMBOL(drm_dev_unref);
+
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ * @flags: Flags passed to the driver's .load() function
+ *
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
+ *
+ * Never call this twice on any device!
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
+{
+ int ret;
+
+ mutex_lock(&drm_global_mutex);
+
+ ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_minors;
+
+ ret = drm_minor_register(dev, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_minors;
+
+ ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_minors;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, flags);
+ if (ret)
+ goto err_minors;
}
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- usize) != 0)
- retcode = -EFAULT;
+ /* setup grouping for legacy outputs */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_unload;
}
- err_i1:
- if (!ioctl)
- DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, cmd, nr);
-
- if (kdata != stack_kdata)
- kfree(kdata);
- if (retcode)
- DRM_DEBUG("ret = %d\n", retcode);
- return retcode;
+ ret = 0;
+ goto out_unlock;
+
+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+err_minors:
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+out_unlock:
+ mutex_unlock(&drm_global_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_dev_register);
+
+/**
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
+ *
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_unref() to drop their final reference.
+ */
+void drm_dev_unregister(struct drm_device *dev)
+{
+ struct drm_map_list *r_list, *list_temp;
+
+ drm_lastclose(dev);
+
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+
+ if (dev->agp)
+ drm_pci_agp_destroy(dev);
+
+ drm_vblank_cleanup(dev);
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+ drm_rmmap(dev, r_list->map);
+
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
}
-EXPORT_SYMBOL(drm_ioctl);
+EXPORT_SYMBOL(drm_dev_unregister);
/**
- * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
+ * drm_dev_set_unique - Set the unique name of a DRM device
+ * @dev: device of which to set the unique name
+ * @fmt: format string for unique name
+ *
+ * Sets the unique name of a DRM device using the specified format string and
+ * a variable list of arguments. Drivers can use this at driver probe time if
+ * the unique name of the devices they drive is static.
*
- * @nr: Ioctl number.
- * @flags: Where to return the ioctl permission flags
+ * Return: 0 on success or a negative error code on failure.
*/
-bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
+int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
{
- if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
- (nr < DRM_COMMAND_BASE)) {
- *flags = drm_ioctls[nr].flags;
- return true;
+ va_list ap;
+
+ kfree(dev->unique);
+
+ va_start(ap, fmt);
+ dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(drm_dev_set_unique);
+
+/*
+ * DRM Core
+ * The DRM core module initializes all global DRM objects and makes them
+ * available to drivers. Once setup, drivers can probe their respective
+ * devices.
+ * Currently, core management includes:
+ * - The "DRM-Global" key/value database
+ * - Global ID management for connectors
+ * - DRM major number allocation
+ * - DRM minor management
+ * - DRM sysfs class
+ * - DRM debugfs root
+ *
+ * Furthermore, the DRM core provides dynamic char-dev lookups. For each
+ * interface registered on a DRM device, you can request minor numbers from DRM
+ * core. DRM core takes care of major-number management and char-dev
+ * registration. A stub ->open() callback forwards any open() requests to the
+ * registered minor.
+ */
+
+static int drm_stub_open(struct inode *inode, struct file *filp)
+{
+ const struct file_operations *new_fops;
+ struct drm_minor *minor;
+ int err;
+
+ DRM_DEBUG("\n");
+
+ mutex_lock(&drm_global_mutex);
+ minor = drm_minor_acquire(iminor(inode));
+ if (IS_ERR(minor)) {
+ err = PTR_ERR(minor);
+ goto out_unlock;
+ }
+
+ new_fops = fops_get(minor->dev->driver->fops);
+ if (!new_fops) {
+ err = -ENODEV;
+ goto out_release;
}
- return false;
+ replace_fops(filp, new_fops);
+ if (filp->f_op->open)
+ err = filp->f_op->open(inode, filp);
+ else
+ err = 0;
+
+out_release:
+ drm_minor_release(minor);
+out_unlock:
+ mutex_unlock(&drm_global_mutex);
+ return err;
}
-EXPORT_SYMBOL(drm_ioctl_flags);
+
+static const struct file_operations drm_stub_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_stub_open,
+ .llseek = noop_llseek,
+};
+
+static int __init drm_core_init(void)
+{
+ int ret = -ENOMEM;
+
+ drm_global_init();
+ drm_connector_ida_init();
+ idr_init(&drm_minors_idr);
+
+ if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
+ goto err_p1;
+
+ drm_class = drm_sysfs_create(THIS_MODULE, "drm");
+ if (IS_ERR(drm_class)) {
+ printk(KERN_ERR "DRM: Error creating drm class.\n");
+ ret = PTR_ERR(drm_class);
+ goto err_p2;
+ }
+
+ drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ if (!drm_debugfs_root) {
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
+ ret = -1;
+ goto err_p3;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+ return 0;
+err_p3:
+ drm_sysfs_destroy();
+err_p2:
+ unregister_chrdev(DRM_MAJOR, "drm");
+
+ idr_destroy(&drm_minors_idr);
+err_p1:
+ return ret;
+}
+
+static void __exit drm_core_exit(void)
+{
+ debugfs_remove(drm_debugfs_root);
+ drm_sysfs_destroy();
+
+ unregister_chrdev(DRM_MAJOR, "drm");
+
+ drm_connector_ida_destroy();
+ idr_destroy(&drm_minors_idr);
+}
+
+module_init(drm_core_init);
+module_exit(drm_core_exit);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index dfa9769b26b5..1dbf3bc4c6a3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3305,6 +3305,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
@@ -3775,8 +3776,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
- /* Populate picture aspect ratio from CEA mode list */
- if (frame->video_code > 0)
+ /*
+ * Populate picture aspect ratio from either
+ * user input (if specified) or from the CEA mode list.
+ */
+ if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
+ mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
+ frame->picture_aspect = mode->picture_aspect_ratio;
+ else if (frame->video_code > 0)
frame->picture_aspect = drm_get_cea_aspect_ratio(
frame->video_code);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index bdee6eb0b24a..3144db9dc0f1 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -119,6 +119,58 @@ fail:
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector)
+{
+ struct drm_fb_helper_connector **temp;
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+ if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
+ temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1;
+ fb_helper->connector_info = temp;
+ }
+
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ return -ENOMEM;
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+
+int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ struct drm_fb_helper_connector *fb_helper_connector;
+ int i, j;
+
+ WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (fb_helper->connector_info[i]->connector == connector)
+ break;
+ }
+
+ if (i == fb_helper->connector_count)
+ return -EINVAL;
+ fb_helper_connector = fb_helper->connector_info[i];
+
+ for (j = i + 1; j < fb_helper->connector_count; j++) {
+ fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
+ }
+ fb_helper->connector_count--;
+ kfree(fb_helper_connector);
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
+
static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
struct drm_fb_helper_connector *fb_helper_conn;
@@ -596,6 +648,7 @@ int drm_fb_helper_init(struct drm_device *dev,
kfree(fb_helper->crtc_info);
return -ENOMEM;
}
+ fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 021fe5d11df5..4b060942cb3c 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -38,6 +38,7 @@
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include "drm_legacy.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -112,55 +113,12 @@ err_undo:
EXPORT_SYMBOL(drm_open);
/**
- * File \c open operation.
- *
- * \param inode device inode.
- * \param filp file pointer.
- *
- * Puts the dev->fops corresponding to the device minor number into
- * \p filp, call the \c open method, and restore the file operations.
- */
-int drm_stub_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev;
- struct drm_minor *minor;
- int err = -ENODEV;
- const struct file_operations *new_fops;
-
- DRM_DEBUG("\n");
-
- mutex_lock(&drm_global_mutex);
- minor = drm_minor_acquire(iminor(inode));
- if (IS_ERR(minor))
- goto out_unlock;
-
- dev = minor->dev;
- new_fops = fops_get(dev->driver->fops);
- if (!new_fops)
- goto out_release;
-
- replace_fops(filp, new_fops);
- if (filp->f_op->open)
- err = filp->f_op->open(inode, filp);
-
-out_release:
- drm_minor_release(minor);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
- return err;
-}
-
-/**
* Check whether DRI will run on this CPU.
*
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
*/
static int drm_cpu_valid(void)
{
-#if defined(__i386__)
- if (boot_cpu_data.x86 == 3)
- return 0; /* No cmpxchg on a 386 */
-#endif
#if defined(__sparc__) && !defined(__sparc_v9__)
return 0; /* No cmpxchg before v9 sparc. */
#endif
@@ -203,8 +161,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
priv->minor = minor;
/* for compatibility root is always authenticated */
- priv->always_authenticated = capable(CAP_SYS_ADMIN);
- priv->authenticated = priv->always_authenticated;
+ priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
@@ -237,7 +194,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
goto out_close;
}
- priv->is_master = 1;
/* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master);
priv->authenticated = 1;
@@ -429,6 +385,10 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("open_count = %d\n", dev->open_count);
+ mutex_lock(&dev->struct_mutex);
+ list_del(&file_priv->lhead);
+ mutex_unlock(&dev->struct_mutex);
+
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
@@ -461,44 +421,18 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
+ drm_legacy_ctxbitmap_flush(dev, file_priv);
mutex_lock(&dev->master_mutex);
- if (file_priv->is_master) {
+ if (drm_is_master(file_priv)) {
struct drm_master *master = file_priv->master;
- struct drm_file *temp;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(temp, &dev->filelist, lhead) {
- if ((temp->master == file_priv->master) &&
- (temp != file_priv))
- temp->authenticated = temp->always_authenticated;
- }
/**
* Since the master is disappearing, so is the
* possibility to lock.
*/
-
+ mutex_lock(&dev->struct_mutex);
if (master->lock.hw_lock) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
@@ -519,13 +453,8 @@ int drm_release(struct inode *inode, struct file *filp)
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
- file_priv->is_master = 0;
mutex_unlock(&dev->master_mutex);
- mutex_lock(&dev->struct_mutex);
- list_del(&file_priv->lhead);
- mutex_unlock(&dev->struct_mutex);
-
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 05c97c5350a1..e467e67af6e7 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -327,7 +327,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
/* Create a CMA GEM buffer. */
cma_obj = __drm_gem_cma_create(dev, size);
if (IS_ERR(cma_obj))
- return ERR_PTR(PTR_ERR(cma_obj));
+ return ERR_CAST(cma_obj);
cma_obj->paddr = sg_dma_address(sgt->sgl);
cma_obj->sgt = sgt;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 86feedd5e6f6..ecaf0fa2eec8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -132,7 +132,7 @@ int drm_bufs_info(struct seq_file *m, void *data)
i,
dma->bufs[i].buf_size,
dma->bufs[i].buf_count,
- atomic_read(&dma->bufs[i].freelist.count),
+ 0,
dma->bufs[i].seg_count,
seg_pages,
seg_pages * PAGE_SIZE / 1024);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 69c61f392e66..d3d1a8c72e98 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -1,11 +1,3 @@
-/**
- * \file drm_ioctl.c
- * IOCTL processing for DRM
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- */
-
/*
* Created: Fri Jan 8 09:01:26 1999 by [email protected]
*
@@ -13,6 +5,9 @@
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author Rickard E. (Rik) Faith <[email protected]>
+ * Author Gareth Hughes <[email protected]>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -35,6 +30,7 @@
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include "drm_legacy.h"
#include <linux/pci.h>
#include <linux/export.h>
@@ -42,6 +38,124 @@
#include <asm/mtrr.h>
#endif
+static int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct drm_ioctl_desc drm_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+#if __OS_HAS_AGP
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+};
+
+#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+
/**
* Get the bus id.
*
@@ -342,8 +456,6 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->stereo_allowed = req->value;
break;
case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
- if (!drm_universal_planes)
- return -EINVAL;
if (req->value > 1)
return -EINVAL;
file_priv->universal_planes = req->value;
@@ -417,3 +529,243 @@ int drm_noop(struct drm_device *dev, void *data,
return 0;
}
EXPORT_SYMBOL(drm_noop);
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
+{
+ int len;
+
+ /* don't overflow userbuf */
+ len = strlen(value);
+ if (len > *buf_len)
+ len = *buf_len;
+
+ /* let userspace know exact length of driver value (which could be
+ * larger than the userspace-supplied buffer) */
+ *buf_len = strlen(value);
+
+ /* finally, try filling in the userbuf */
+ if (len && buf)
+ if (copy_to_user(buf, value, len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+static int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_version *version = data;
+ int err;
+
+ version->version_major = dev->driver->major;
+ version->version_minor = dev->driver->minor;
+ version->version_patchlevel = dev->driver->patchlevel;
+ err = drm_copy_field(version->name, &version->name_len,
+ dev->driver->name);
+ if (!err)
+ err = drm_copy_field(version->date, &version->date_len,
+ dev->driver->date);
+ if (!err)
+ err = drm_copy_field(version->desc, &version->desc_len,
+ dev->driver->desc);
+
+ return err;
+}
+
+/**
+ * drm_ioctl_permit - Check ioctl permissions against caller
+ *
+ * @flags: ioctl permission flags.
+ * @file_priv: Pointer to struct drm_file identifying the caller.
+ *
+ * Checks whether the caller is allowed to run an ioctl with the
+ * indicated permissions. If so, returns zero. Otherwise returns an
+ * error code suitable for ioctl return.
+ */
+static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+{
+ /* ROOT_ONLY is only for CAP_SYS_ADMIN */
+ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
+ return -EACCES;
+
+ /* AUTH is only for authenticated or render client */
+ if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
+ !file_priv->authenticated))
+ return -EACCES;
+
+ /* MASTER is only for master or control clients */
+ if (unlikely((flags & DRM_MASTER) && !drm_is_master(file_priv) &&
+ !drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Control clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
+ drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Render clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
+ drm_is_render_client(file_priv)))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+long drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ const struct drm_ioctl_desc *ioctl = NULL;
+ drm_ioctl_t *func;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ int retcode = -EINVAL;
+ char stack_kdata[128];
+ char *kdata = NULL;
+ unsigned int usize, asize;
+
+ dev = file_priv->minor->dev;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+ ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+ goto err_i1;
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ u32 drv_size;
+ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ drv_size = _IOC_SIZE(ioctl->cmd_drv);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+ cmd = ioctl->cmd_drv;
+ }
+ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+ u32 drv_size;
+
+ ioctl = &drm_ioctls[nr];
+
+ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+
+ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, ioctl->name);
+
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ goto err_i1;
+ }
+
+ retcode = drm_ioctl_permit(ioctl->flags, file_priv);
+ if (unlikely(retcode))
+ goto err_i1;
+
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
+ }
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
+ }
+
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg,
+ usize) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
+ else {
+ mutex_lock(&drm_global_mutex);
+ retcode = func(dev, kdata, file_priv);
+ mutex_unlock(&drm_global_mutex);
+ }
+
+ if (cmd & IOC_OUT) {
+ if (copy_to_user((void __user *)arg, kdata,
+ usize) != 0)
+ retcode = -EFAULT;
+ }
+
+ err_i1:
+ if (!ioctl)
+ DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, cmd, nr);
+
+ if (kdata != stack_kdata)
+ kfree(kdata);
+ if (retcode)
+ DRM_DEBUG("ret = %d\n", retcode);
+ return retcode;
+}
+EXPORT_SYMBOL(drm_ioctl);
+
+/**
+ * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
+ *
+ * @nr: Ioctl number.
+ * @flags: Where to return the ioctl permission flags
+ */
+bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
+{
+ if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
+ (nr < DRM_COMMAND_BASE)) {
+ *flags = drm_ioctls[nr].flags;
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
new file mode 100644
index 000000000000..d34f20a79b7c
--- /dev/null
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -0,0 +1,51 @@
+#ifndef __DRM_LEGACY_H__
+#define __DRM_LEGACY_H__
+
+/*
+ * Copyright (c) 2014 David Herrmann <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+struct drm_device;
+struct drm_file;
+
+/*
+ * Generic DRM Contexts
+ */
+
+#define DRM_KERNEL_CONTEXT 0
+#define DRM_RESERVED_CONTEXTS 1
+
+int drm_legacy_ctxbitmap_init(struct drm_device *dev);
+void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
+void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
+
+int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
+
+int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
+
+#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f6452682141b..ea1572596578 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -35,6 +35,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
+#include "drm_legacy.h"
static int drm_notifier(void *priv);
@@ -111,7 +112,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
/* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb
* xserver for now */
- if (!file_priv->is_master) {
+ if (!drm_is_master(file_priv)) {
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index e633df2f68d8..6aa6a9e95570 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -201,16 +201,15 @@ EXPORT_SYMBOL(mipi_dsi_detach);
/**
* mipi_dsi_dcs_write - send DCS write command
* @dsi: DSI device
- * @channel: virtual channel
* @data: pointer to the command followed by parameters
* @len: length of @data
*/
-int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
- const void *data, size_t len)
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
+ size_t len)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
- .channel = channel,
+ .channel = dsi->channel,
.tx_buf = data,
.tx_len = len
};
@@ -239,19 +238,18 @@ EXPORT_SYMBOL(mipi_dsi_dcs_write);
/**
* mipi_dsi_dcs_read - send DCS read request command
* @dsi: DSI device
- * @channel: virtual channel
* @cmd: DCS read command
* @data: pointer to read buffer
* @len: length of @data
*
* Function returns number of read bytes or error code.
*/
-ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
- u8 cmd, void *data, size_t len)
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
+ size_t len)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
- .channel = channel,
+ .channel = dsi->channel,
.type = MIPI_DSI_DCS_READ,
.tx_buf = &cmd,
.tx_len = 1,
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
new file mode 100644
index 000000000000..16150a00c237
--- /dev/null
+++ b/drivers/gpu/drm/drm_of.c
@@ -0,0 +1,67 @@
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/of_graph.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_of.h>
+
+/**
+ * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
+ * @dev: DRM device
+ * @port: port OF node
+ *
+ * Given a port OF node, return the possible mask of the corresponding
+ * CRTC within a device's list of CRTCs. Returns zero if not found.
+ */
+static uint32_t drm_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port)
+{
+ unsigned int index = 0;
+ struct drm_crtc *tmp;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp->port == port)
+ return 1 << index;
+
+ index++;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
+ * @dev: DRM device
+ * @port: encoder port to scan for endpoints
+ *
+ * Scan all endpoints attached to a port, locate their attached CRTCs,
+ * and generate the DRM mask of CRTCs which may be attached to this
+ * encoder.
+ *
+ * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ */
+uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+ struct device_node *port)
+{
+ struct device_node *remote_port, *ep = NULL;
+ uint32_t possible_crtcs = 0;
+
+ do {
+ ep = of_graph_get_next_endpoint(port, ep);
+ if (!ep)
+ break;
+
+ remote_port = of_graph_get_remote_port(ep);
+ if (!remote_port) {
+ of_node_put(ep);
+ return 0;
+ }
+
+ possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
+
+ of_node_put(remote_port);
+ } while (1);
+
+ return possible_crtcs;
+}
+EXPORT_SYMBOL(drm_of_find_possible_crtcs);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 6d133149cc74..827ec1a3040b 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -335,9 +335,10 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
}
/* possible_crtc's will be filled in later by crtc_init */
- ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs,
- formats, num_formats,
- DRM_PLANE_TYPE_PRIMARY);
+ ret = drm_universal_plane_init(dev, primary, 0,
+ &drm_primary_helper_funcs,
+ formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY);
if (ret) {
kfree(primary);
primary = NULL;
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 7047ca025787..631f5afd451c 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -293,3 +293,143 @@ void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
}
EXPORT_SYMBOL(drm_rect_debug_print);
+
+/**
+ * drm_rect_rotate - Rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation to be applied
+ *
+ * Apply @rotation to the coordinates of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the untransformed coordinate
+ * space.
+ */
+void drm_rect_rotate(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation)
+{
+ struct drm_rect tmp;
+
+ if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+ tmp = *r;
+
+ if (rotation & BIT(DRM_REFLECT_X)) {
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ }
+
+ if (rotation & BIT(DRM_REFLECT_Y)) {
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ }
+ }
+
+ switch (rotation & 0xf) {
+ case BIT(DRM_ROTATE_0):
+ break;
+ case BIT(DRM_ROTATE_90):
+ tmp = *r;
+ r->x1 = tmp.y1;
+ r->x2 = tmp.y2;
+ r->y1 = width - tmp.x2;
+ r->y2 = width - tmp.x1;
+ break;
+ case BIT(DRM_ROTATE_180):
+ tmp = *r;
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ break;
+ case BIT(DRM_ROTATE_270):
+ tmp = *r;
+ r->x1 = height - tmp.y2;
+ r->x2 = height - tmp.y1;
+ r->y1 = tmp.x1;
+ r->y2 = tmp.x2;
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_rect_rotate);
+
+/**
+ * drm_rect_rotate_inv - Inverse rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation whose inverse is to be applied
+ *
+ * Apply the inverse of @rotation to the coordinates
+ * of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the original untransformed
+ * coordinate space, so that you never have to flip
+ * them when doing a rotatation and its inverse.
+ * That is, if you do:
+ *
+ * drm_rotate(&r, width, height, rotation);
+ * drm_rotate_inv(&r, width, height, rotation);
+ *
+ * you will always get back the original rectangle.
+ */
+void drm_rect_rotate_inv(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation)
+{
+ struct drm_rect tmp;
+
+ switch (rotation & 0xf) {
+ case BIT(DRM_ROTATE_0):
+ break;
+ case BIT(DRM_ROTATE_90):
+ tmp = *r;
+ r->x1 = width - tmp.y2;
+ r->x2 = width - tmp.y1;
+ r->y1 = tmp.x1;
+ r->y2 = tmp.x2;
+ break;
+ case BIT(DRM_ROTATE_180):
+ tmp = *r;
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ break;
+ case BIT(DRM_ROTATE_270):
+ tmp = *r;
+ r->x1 = tmp.y1;
+ r->x2 = tmp.y2;
+ r->y1 = height - tmp.x2;
+ r->y2 = height - tmp.x1;
+ break;
+ default:
+ break;
+ }
+
+ if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+ tmp = *r;
+
+ if (rotation & BIT(DRM_REFLECT_X)) {
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ }
+
+ if (rotation & BIT(DRM_REFLECT_Y)) {
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_rect_rotate_inv);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
deleted file mode 100644
index 4e862b467740..000000000000
--- a/drivers/gpu/drm/drm_stub.c
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * Created: Fri Jan 19 10:48:35 2001 by [email protected]
- *
- * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author Rickard E. (Rik) Faith <[email protected]>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mount.h>
-#include <linux/slab.h>
-#include <drm/drmP.h>
-#include <drm/drm_core.h>
-
-unsigned int drm_debug = 0; /* 1 to enable debug output */
-EXPORT_SYMBOL(drm_debug);
-
-/* 1 to allow user space to request universal planes (experimental) */
-unsigned int drm_universal_planes = 0;
-
-unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
-
-unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
-
-/*
- * Default to use monotonic timestamps for wait-for-vblank and page-flip
- * complete events.
- */
-unsigned int drm_timestamp_monotonic = 1;
-
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
-MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output");
-MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
-MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
-MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
-
-module_param_named(debug, drm_debug, int, 0600);
-module_param_named(universal_planes, drm_universal_planes, int, 0600);
-module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
-module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
-module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
-
-static DEFINE_SPINLOCK(drm_minor_lock);
-struct idr drm_minors_idr;
-
-struct class *drm_class;
-struct dentry *drm_debugfs_root;
-
-int drm_err(const char *func, const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
- int r;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
-
- va_end(args);
-
- return r;
-}
-EXPORT_SYMBOL(drm_err);
-
-void drm_ut_debug_printk(const char *function_name, const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, format);
- vaf.fmt = format;
- vaf.va = &args;
-
- printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(drm_ut_debug_printk);
-
-struct drm_master *drm_master_create(struct drm_minor *minor)
-{
- struct drm_master *master;
-
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
- return NULL;
-
- kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
- if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
- kfree(master);
- return NULL;
- }
- INIT_LIST_HEAD(&master->magicfree);
- master->minor = minor;
-
- return master;
-}
-
-struct drm_master *drm_master_get(struct drm_master *master)
-{
- kref_get(&master->refcount);
- return master;
-}
-EXPORT_SYMBOL(drm_master_get);
-
-static void drm_master_destroy(struct kref *kref)
-{
- struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_magic_entry *pt, *next;
- struct drm_device *dev = master->minor->dev;
- struct drm_map_list *r_list, *list_temp;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
-
- if (master->unique) {
- kfree(master->unique);
- master->unique = NULL;
- master->unique_len = 0;
- }
-
- list_for_each_entry_safe(pt, next, &master->magicfree, head) {
- list_del(&pt->head);
- drm_ht_remove_item(&master->magiclist, &pt->hash_item);
- kfree(pt);
- }
-
- drm_ht_remove(&master->magiclist);
-
- mutex_unlock(&dev->struct_mutex);
- kfree(master);
-}
-
-void drm_master_put(struct drm_master **master)
-{
- kref_put(&(*master)->refcount, drm_master_destroy);
- *master = NULL;
-}
-EXPORT_SYMBOL(drm_master_put);
-
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = 0;
-
- mutex_lock(&dev->master_mutex);
- if (file_priv->is_master)
- goto out_unlock;
-
- if (file_priv->minor->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
- }
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = -EINVAL;
-
- mutex_lock(&dev->master_mutex);
- if (!file_priv->is_master)
- goto out_unlock;
-
- if (!file_priv->minor->master)
- goto out_unlock;
-
- ret = 0;
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, false);
- drm_master_put(&file_priv->minor->master);
- file_priv->is_master = 0;
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
-/*
- * DRM Minors
- * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
- * of them is represented by a drm_minor object. Depending on the capabilities
- * of the device-driver, different interfaces are registered.
- *
- * Minors can be accessed via dev->$minor_name. This pointer is either
- * NULL or a valid drm_minor pointer and stays valid as long as the device is
- * valid. This means, DRM minors have the same life-time as the underlying
- * device. However, this doesn't mean that the minor is active. Minors are
- * registered and unregistered dynamically according to device-state.
- */
-
-static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
- unsigned int type)
-{
- switch (type) {
- case DRM_MINOR_LEGACY:
- return &dev->primary;
- case DRM_MINOR_RENDER:
- return &dev->render;
- case DRM_MINOR_CONTROL:
- return &dev->control;
- default:
- return NULL;
- }
-}
-
-static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor *minor;
-
- minor = kzalloc(sizeof(*minor), GFP_KERNEL);
- if (!minor)
- return -ENOMEM;
-
- minor->type = type;
- minor->dev = dev;
-
- *drm_minor_get_slot(dev, type) = minor;
- return 0;
-}
-
-static void drm_minor_free(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor **slot;
-
- slot = drm_minor_get_slot(dev, type);
- if (*slot) {
- drm_mode_group_destroy(&(*slot)->mode_group);
- kfree(*slot);
- *slot = NULL;
- }
-}
-
-static int drm_minor_register(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor *new_minor;
- unsigned long flags;
- int ret;
- int minor_id;
-
- DRM_DEBUG("\n");
-
- new_minor = *drm_minor_get_slot(dev, type);
- if (!new_minor)
- return 0;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&drm_minor_lock, flags);
- minor_id = idr_alloc(&drm_minors_idr,
- NULL,
- 64 * type,
- 64 * (type + 1),
- GFP_NOWAIT);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- idr_preload_end();
-
- if (minor_id < 0)
- return minor_id;
-
- new_minor->index = minor_id;
-
- ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_id;
- }
-
- ret = drm_sysfs_device_add(new_minor);
- if (ret) {
- DRM_ERROR("DRM: Error sysfs_device_add.\n");
- goto err_debugfs;
- }
-
- /* replace NULL with @minor so lookups will succeed from now on */
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_replace(&drm_minors_idr, new_minor, new_minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-
- DRM_DEBUG("new minor assigned %d\n", minor_id);
- return 0;
-
-err_debugfs:
- drm_debugfs_cleanup(new_minor);
-err_id:
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor_id);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- new_minor->index = 0;
- return ret;
-}
-
-static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor *minor;
- unsigned long flags;
-
- minor = *drm_minor_get_slot(dev, type);
- if (!minor || !minor->kdev)
- return;
-
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- minor->index = 0;
-
- drm_debugfs_cleanup(minor);
- drm_sysfs_device_remove(minor);
-}
-
-/**
- * drm_minor_acquire - Acquire a DRM minor
- * @minor_id: Minor ID of the DRM-minor
- *
- * Looks up the given minor-ID and returns the respective DRM-minor object. The
- * refence-count of the underlying device is increased so you must release this
- * object with drm_minor_release().
- *
- * As long as you hold this minor, it is guaranteed that the object and the
- * minor->dev pointer will stay valid! However, the device may get unplugged and
- * unregistered while you hold the minor.
- *
- * Returns:
- * Pointer to minor-object with increased device-refcount, or PTR_ERR on
- * failure.
- */
-struct drm_minor *drm_minor_acquire(unsigned int minor_id)
-{
- struct drm_minor *minor;
- unsigned long flags;
-
- spin_lock_irqsave(&drm_minor_lock, flags);
- minor = idr_find(&drm_minors_idr, minor_id);
- if (minor)
- drm_dev_ref(minor->dev);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-
- if (!minor) {
- return ERR_PTR(-ENODEV);
- } else if (drm_device_is_unplugged(minor->dev)) {
- drm_dev_unref(minor->dev);
- return ERR_PTR(-ENODEV);
- }
-
- return minor;
-}
-
-/**
- * drm_minor_release - Release DRM minor
- * @minor: Pointer to DRM minor object
- *
- * Release a minor that was previously acquired via drm_minor_acquire().
- */
-void drm_minor_release(struct drm_minor *minor)
-{
- drm_dev_unref(minor->dev);
-}
-
-/**
- * drm_put_dev - Unregister and release a DRM device
- * @dev: DRM device
- *
- * Called at module unload time or when a PCI device is unplugged.
- *
- * Use of this function is discouraged. It will eventually go away completely.
- * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
- *
- * Cleans up all DRM device, calling drm_lastclose().
- */
-void drm_put_dev(struct drm_device *dev)
-{
- DRM_DEBUG("\n");
-
- if (!dev) {
- DRM_ERROR("cleanup called no dev\n");
- return;
- }
-
- drm_dev_unregister(dev);
- drm_dev_unref(dev);
-}
-EXPORT_SYMBOL(drm_put_dev);
-
-void drm_unplug_dev(struct drm_device *dev)
-{
- /* for a USB device */
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
-
- mutex_lock(&drm_global_mutex);
-
- drm_device_set_unplugged(dev);
-
- if (dev->open_count == 0) {
- drm_put_dev(dev);
- }
- mutex_unlock(&drm_global_mutex);
-}
-EXPORT_SYMBOL(drm_unplug_dev);
-
-/*
- * DRM internal mount
- * We want to be able to allocate our own "struct address_space" to control
- * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
- * stand-alone address_space objects, so we need an underlying inode. As there
- * is no way to allocate an independent inode easily, we need a fake internal
- * VFS mount-point.
- *
- * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
- * frees it again. You are allowed to use iget() and iput() to get references to
- * the inode. But each drm_fs_inode_new() call must be paired with exactly one
- * drm_fs_inode_free() call (which does not have to be the last iput()).
- * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
- * between multiple inode-users. You could, technically, call
- * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
- * iput(), but this way you'd end up with a new vfsmount for each inode.
- */
-
-static int drm_fs_cnt;
-static struct vfsmount *drm_fs_mnt;
-
-static const struct dentry_operations drm_fs_dops = {
- .d_dname = simple_dname,
-};
-
-static const struct super_operations drm_fs_sops = {
- .statfs = simple_statfs,
-};
-
-static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- return mount_pseudo(fs_type,
- "drm:",
- &drm_fs_sops,
- &drm_fs_dops,
- 0x010203ff);
-}
-
-static struct file_system_type drm_fs_type = {
- .name = "drm",
- .owner = THIS_MODULE,
- .mount = drm_fs_mount,
- .kill_sb = kill_anon_super,
-};
-
-static struct inode *drm_fs_inode_new(void)
-{
- struct inode *inode;
- int r;
-
- r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
- if (r < 0) {
- DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
- return ERR_PTR(r);
- }
-
- inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
- if (IS_ERR(inode))
- simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
-
- return inode;
-}
-
-static void drm_fs_inode_free(struct inode *inode)
-{
- if (inode) {
- iput(inode);
- simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
- }
-}
-
-/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
- * @parent: Parent device object
- *
- * Allocate and initialize a new DRM device. No device registration is done.
- * Call drm_dev_register() to advertice the device to user space and register it
- * with other core subsystems.
- *
- * The initial ref-count of the object is 1. Use drm_dev_ref() and
- * drm_dev_unref() to take and drop further ref-counts.
- *
- * RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
- */
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
- struct device *parent)
-{
- struct drm_device *dev;
- int ret;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return NULL;
-
- kref_init(&dev->ref);
- dev->dev = parent;
- dev->driver = driver;
-
- INIT_LIST_HEAD(&dev->filelist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- INIT_LIST_HEAD(&dev->vblank_event_list);
-
- spin_lock_init(&dev->buf_lock);
- spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
- mutex_init(&dev->master_mutex);
-
- dev->anon_inode = drm_fs_inode_new();
- if (IS_ERR(dev->anon_inode)) {
- ret = PTR_ERR(dev->anon_inode);
- DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
- goto err_free;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
- if (ret)
- goto err_minors;
- }
-
- if (drm_core_check_feature(dev, DRIVER_RENDER)) {
- ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
- if (ret)
- goto err_minors;
- }
-
- ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
- if (ret)
- goto err_minors;
-
- if (drm_ht_create(&dev->map_hash, 12))
- goto err_minors;
-
- ret = drm_ctxbitmap_init(dev);
- if (ret) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto err_ht;
- }
-
- if (driver->driver_features & DRIVER_GEM) {
- ret = drm_gem_init(dev);
- if (ret) {
- DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
- goto err_ctxbitmap;
- }
- }
-
- return dev;
-
-err_ctxbitmap:
- drm_ctxbitmap_cleanup(dev);
-err_ht:
- drm_ht_remove(&dev->map_hash);
-err_minors:
- drm_minor_free(dev, DRM_MINOR_LEGACY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_minor_free(dev, DRM_MINOR_CONTROL);
- drm_fs_inode_free(dev->anon_inode);
-err_free:
- mutex_destroy(&dev->master_mutex);
- kfree(dev);
- return NULL;
-}
-EXPORT_SYMBOL(drm_dev_alloc);
-
-static void drm_dev_release(struct kref *ref)
-{
- struct drm_device *dev = container_of(ref, struct drm_device, ref);
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_destroy(dev);
-
- drm_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
- drm_fs_inode_free(dev->anon_inode);
-
- drm_minor_free(dev, DRM_MINOR_LEGACY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_minor_free(dev, DRM_MINOR_CONTROL);
-
- mutex_destroy(&dev->master_mutex);
- kfree(dev->unique);
- kfree(dev);
-}
-
-/**
- * drm_dev_ref - Take reference of a DRM device
- * @dev: device to take reference of or NULL
- *
- * This increases the ref-count of @dev by one. You *must* already own a
- * reference when calling this. Use drm_dev_unref() to drop this reference
- * again.
- *
- * This function never fails. However, this function does not provide *any*
- * guarantee whether the device is alive or running. It only provides a
- * reference to the object and the memory associated with it.
- */
-void drm_dev_ref(struct drm_device *dev)
-{
- if (dev)
- kref_get(&dev->ref);
-}
-EXPORT_SYMBOL(drm_dev_ref);
-
-/**
- * drm_dev_unref - Drop reference of a DRM device
- * @dev: device to drop reference of or NULL
- *
- * This decreases the ref-count of @dev by one. The device is destroyed if the
- * ref-count drops to zero.
- */
-void drm_dev_unref(struct drm_device *dev)
-{
- if (dev)
- kref_put(&dev->ref, drm_dev_release);
-}
-EXPORT_SYMBOL(drm_dev_unref);
-
-/**
- * drm_dev_register - Register DRM device
- * @dev: Device to register
- * @flags: Flags passed to the driver's .load() function
- *
- * Register the DRM device @dev with the system, advertise device to user-space
- * and start normal device operation. @dev must be allocated via drm_dev_alloc()
- * previously.
- *
- * Never call this twice on any device!
- *
- * RETURNS:
- * 0 on success, negative error code on failure.
- */
-int drm_dev_register(struct drm_device *dev, unsigned long flags)
-{
- int ret;
-
- mutex_lock(&drm_global_mutex);
-
- ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
- if (ret)
- goto err_minors;
-
- ret = drm_minor_register(dev, DRM_MINOR_RENDER);
- if (ret)
- goto err_minors;
-
- ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
- if (ret)
- goto err_minors;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, flags);
- if (ret)
- goto err_minors;
- }
-
- /* setup grouping for legacy outputs */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_unload;
- }
-
- ret = 0;
- goto out_unlock;
-
-err_unload:
- if (dev->driver->unload)
- dev->driver->unload(dev);
-err_minors:
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
- return ret;
-}
-EXPORT_SYMBOL(drm_dev_register);
-
-/**
- * drm_dev_unregister - Unregister DRM device
- * @dev: Device to unregister
- *
- * Unregister the DRM device from the system. This does the reverse of
- * drm_dev_register() but does not deallocate the device. The caller must call
- * drm_dev_unref() to drop their final reference.
- */
-void drm_dev_unregister(struct drm_device *dev)
-{
- struct drm_map_list *r_list, *list_temp;
-
- drm_lastclose(dev);
-
- if (dev->driver->unload)
- dev->driver->unload(dev);
-
- if (dev->agp)
- drm_pci_agp_destroy(dev);
-
- drm_vblank_cleanup(dev);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
-
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
-}
-EXPORT_SYMBOL(drm_dev_unregister);
-
-/**
- * drm_dev_set_unique - Set the unique name of a DRM device
- * @dev: device of which to set the unique name
- * @fmt: format string for unique name
- *
- * Sets the unique name of a DRM device using the specified format string and
- * a variable list of arguments. Drivers can use this at driver probe time if
- * the unique name of the devices they drive is static.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
-{
- va_list ap;
-
- kfree(dev->unique);
-
- va_start(ap, fmt);
- dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
- va_end(ap);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-EXPORT_SYMBOL(drm_dev_set_unique);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 7827dad8fcf4..ab1a5f6dde8a 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -493,71 +493,55 @@ static void drm_sysfs_release(struct device *dev)
}
/**
- * drm_sysfs_device_add - adds a class device to sysfs for a character driver
- * @dev: DRM device to be added
- * @head: DRM head in question
+ * drm_sysfs_minor_alloc() - Allocate sysfs device for given minor
+ * @minor: minor to allocate sysfs device for
*
- * Add a DRM device to the DRM's device model class. We use @dev's PCI device
- * as the parent for the Linux device, and make sure it has a file containing
- * the driver we're using (for userspace compatibility).
+ * This allocates a new sysfs device for @minor and returns it. The device is
+ * not registered nor linked. The caller has to use device_add() and
+ * device_del() to register and unregister it.
+ *
+ * Note that dev_get_drvdata() on the new device will return the minor.
+ * However, the device does not hold a ref-count to the minor nor to the
+ * underlying drm_device. This is unproblematic as long as you access the
+ * private data only in sysfs callbacks. device_del() disables those
+ * synchronously, so they cannot be called after you cleanup a minor.
*/
-int drm_sysfs_device_add(struct drm_minor *minor)
+struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
- char *minor_str;
+ const char *minor_str;
+ struct device *kdev;
int r;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
- else if (minor->type == DRM_MINOR_RENDER)
- minor_str = "renderD%d";
- else
- minor_str = "card%d";
-
- minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
- if (!minor->kdev) {
- r = -ENOMEM;
- goto error;
- }
-
- device_initialize(minor->kdev);
- minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
- minor->kdev->class = drm_class;
- minor->kdev->type = &drm_sysfs_device_minor;
- minor->kdev->parent = minor->dev->dev;
- minor->kdev->release = drm_sysfs_release;
- dev_set_drvdata(minor->kdev, minor);
-
- r = dev_set_name(minor->kdev, minor_str, minor->index);
+ else if (minor->type == DRM_MINOR_RENDER)
+ minor_str = "renderD%d";
+ else
+ minor_str = "card%d";
+
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev)
+ return ERR_PTR(-ENOMEM);
+
+ device_initialize(kdev);
+ kdev->devt = MKDEV(DRM_MAJOR, minor->index);
+ kdev->class = drm_class;
+ kdev->type = &drm_sysfs_device_minor;
+ kdev->parent = minor->dev->dev;
+ kdev->release = drm_sysfs_release;
+ dev_set_drvdata(kdev, minor);
+
+ r = dev_set_name(kdev, minor_str, minor->index);
if (r < 0)
- goto error;
-
- r = device_add(minor->kdev);
- if (r < 0)
- goto error;
-
- return 0;
+ goto err_free;
-error:
- DRM_ERROR("device create failed %d\n", r);
- put_device(minor->kdev);
- return r;
-}
+ return kdev;
-/**
- * drm_sysfs_device_remove - remove DRM device
- * @dev: DRM device to remove
- *
- * This call unregisters and cleans up a class device that was created with a
- * call to drm_sysfs_device_add()
- */
-void drm_sysfs_device_remove(struct drm_minor *minor)
-{
- if (minor->kdev)
- device_unregister(minor->kdev);
- minor->kdev = NULL;
+err_free:
+ put_device(kdev);
+ return ERR_PTR(r);
}
-
/**
* drm_class_device_register - Register a struct device in the drm class.
*
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 178d2a9672a8..7f9f6f9e9b7e 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -28,6 +28,7 @@ config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
depends on DRM_EXYNOS && !FB_S3C
select FB_MODE_HELPERS
+ select MFD_SYSCON
help
Choose this option if you want to use Exynos FIMD for DRM.
@@ -52,6 +53,7 @@ config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS
+ select DRM_PANEL
help
This enables support for DP device.
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 86dc69d9eabb..4f3c7eb2d37d 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
@@ -28,6 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <drm/bridge/ptn3460.h>
#include "exynos_drm_drv.h"
@@ -41,7 +41,7 @@ struct bridge_init {
struct device_node *node;
};
-static int exynos_dp_init_dp(struct exynos_dp_device *dp)
+static void exynos_dp_init_dp(struct exynos_dp_device *dp)
{
exynos_dp_reset(dp);
@@ -58,8 +58,6 @@ static int exynos_dp_init_dp(struct exynos_dp_device *dp)
exynos_dp_init_hpd(dp);
exynos_dp_init_aux(dp);
-
- return 0;
}
static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
@@ -875,10 +873,24 @@ static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
static void exynos_dp_hotplug(struct work_struct *work)
{
struct exynos_dp_device *dp;
- int ret;
dp = container_of(work, struct exynos_dp_device, hotplug_work);
+ if (dp->drm_dev)
+ drm_helper_hpd_irq_event(dp->drm_dev);
+}
+
+static void exynos_dp_commit(struct exynos_drm_display *display)
+{
+ struct exynos_dp_device *dp = display->ctx;
+ int ret;
+
+ /* Keep the panel disabled while we configure video */
+ if (dp->panel) {
+ if (drm_panel_disable(dp->panel))
+ DRM_ERROR("failed to disable the panel\n");
+ }
+
ret = exynos_dp_detect_hpd(dp);
if (ret) {
/* Cable has been disconnected, we're done */
@@ -909,6 +921,12 @@ static void exynos_dp_hotplug(struct work_struct *work)
ret = exynos_dp_config_video(dp);
if (ret)
dev_err(dp->dev, "unable to config video\n");
+
+ /* Safe to enable the panel now */
+ if (dp->panel) {
+ if (drm_panel_enable(dp->panel))
+ DRM_ERROR("failed to enable the panel\n");
+ }
}
static enum drm_connector_status exynos_dp_detect(
@@ -933,15 +951,18 @@ static int exynos_dp_get_modes(struct drm_connector *connector)
struct exynos_dp_device *dp = ctx_from_connector(connector);
struct drm_display_mode *mode;
+ if (dp->panel)
+ return drm_panel_get_modes(dp->panel);
+
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_ERROR("failed to create a new display mode.\n");
return 0;
}
- drm_display_mode_from_videomode(&dp->panel.vm, mode);
- mode->width_mm = dp->panel.width_mm;
- mode->height_mm = dp->panel.height_mm;
+ drm_display_mode_from_videomode(&dp->priv.vm, mode);
+ mode->width_mm = dp->priv.width_mm;
+ mode->height_mm = dp->priv.height_mm;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
@@ -1021,7 +1042,10 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
+ if (dp->panel)
+ ret = drm_panel_attach(dp->panel, &dp->connector);
+
+ return ret;
}
static void exynos_dp_phy_init(struct exynos_dp_device *dp)
@@ -1050,26 +1074,50 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
}
}
-static void exynos_dp_poweron(struct exynos_dp_device *dp)
+static void exynos_dp_poweron(struct exynos_drm_display *display)
{
+ struct exynos_dp_device *dp = display->ctx;
+
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
+ if (dp->panel) {
+ if (drm_panel_prepare(dp->panel)) {
+ DRM_ERROR("failed to setup the panel\n");
+ return;
+ }
+ }
+
clk_prepare_enable(dp->clock);
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
enable_irq(dp->irq);
+ exynos_dp_commit(display);
}
-static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+static void exynos_dp_poweroff(struct exynos_drm_display *display)
{
+ struct exynos_dp_device *dp = display->ctx;
+
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
+ if (dp->panel) {
+ if (drm_panel_disable(dp->panel)) {
+ DRM_ERROR("failed to disable the panel\n");
+ return;
+ }
+ }
+
disable_irq(dp->irq);
flush_work(&dp->hotplug_work);
exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);
+
+ if (dp->panel) {
+ if (drm_panel_unprepare(dp->panel))
+ DRM_ERROR("failed to turnoff the panel\n");
+ }
}
static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
@@ -1078,12 +1126,12 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- exynos_dp_poweron(dp);
+ exynos_dp_poweron(display);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- exynos_dp_poweroff(dp);
+ exynos_dp_poweroff(display);
break;
default:
break;
@@ -1094,6 +1142,7 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
static struct exynos_drm_display_ops exynos_dp_display_ops = {
.create_connector = exynos_dp_create_connector,
.dpms = exynos_dp_dpms,
+ .commit = exynos_dp_commit,
};
static struct exynos_drm_display exynos_dp_display = {
@@ -1201,7 +1250,7 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
- ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm,
+ ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm,
OF_USE_NATIVE_MODE);
if (ret) {
DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
@@ -1215,16 +1264,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
struct resource *res;
- struct exynos_dp_device *dp;
+ struct exynos_dp_device *dp = exynos_dp_display.ctx;
unsigned int irq_flags;
-
int ret = 0;
- dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
- GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
-
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1236,9 +1279,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = exynos_dp_dt_parse_panel(dp);
- if (ret)
- return ret;
+ if (!dp->panel) {
+ ret = exynos_dp_dt_parse_panel(dp);
+ if (ret)
+ return ret;
+ }
dp->clock = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
@@ -1298,7 +1343,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
disable_irq(dp->irq);
dp->drm_dev = drm_dev;
- exynos_dp_display.ctx = dp;
platform_set_drvdata(pdev, &exynos_dp_display);
@@ -1325,6 +1369,9 @@ static const struct component_ops exynos_dp_ops = {
static int exynos_dp_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *panel_node;
+ struct exynos_dp_device *dp;
int ret;
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
@@ -1332,6 +1379,21 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (ret)
return ret;
+ dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+ GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ panel_node = of_parse_phandle(dev->of_node, "panel", 0);
+ if (panel_node) {
+ dp->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!dp->panel)
+ return -EPROBE_DEFER;
+ }
+
+ exynos_dp_display.ctx = dp;
+
ret = component_add(&pdev->dev, &exynos_dp_ops);
if (ret)
exynos_drm_component_del(&pdev->dev,
@@ -1376,6 +1438,7 @@ static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
};
+MODULE_DEVICE_TABLE(of, exynos_dp_match);
struct platform_driver dp_driver = {
.probe = exynos_dp_probe,
@@ -1390,4 +1453,4 @@ struct platform_driver dp_driver = {
MODULE_AUTHOR("Jingoo Han <[email protected]>");
MODULE_DESCRIPTION("Samsung SoC DP Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 02cc4f9ab903..a1aee6931bd7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -149,6 +149,7 @@ struct exynos_dp_device {
struct drm_device *drm_dev;
struct drm_connector connector;
struct drm_encoder *encoder;
+ struct drm_panel *panel;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
@@ -162,7 +163,7 @@ struct exynos_dp_device {
int dpms_mode;
int hpd_gpio;
- struct exynos_drm_panel_info panel;
+ struct exynos_drm_panel_info priv;
};
/* exynos_dp_reg.c */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 25c788832e2e..ba9b3d5ed672 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -117,20 +117,7 @@ static struct drm_encoder *exynos_drm_best_encoder(
struct drm_device *dev = connector->dev;
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
- obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
- exynos_connector->encoder_id);
- return NULL;
- }
-
- encoder = obj_to_encoder(obj);
-
- return encoder;
+ return drm_encoder_find(dev, exynos_connector->encoder_id);
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 95c9435d0266..b68e58f78cd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -69,8 +69,10 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
if (mode > DRM_MODE_DPMS_ON) {
/* wait for the completion of page flip. */
- wait_event(exynos_crtc->pending_flip_queue,
- atomic_read(&exynos_crtc->pending_flip) == 0);
+ if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
+ !atomic_read(&exynos_crtc->pending_flip),
+ HZ/20))
+ atomic_set(&exynos_crtc->pending_flip, 0);
drm_vblank_off(crtc->dev, exynos_crtc->pipe);
}
@@ -259,6 +261,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
+ atomic_set(&exynos_crtc->pending_flip, 0);
spin_unlock_irq(&dev->event_lock);
goto out;
@@ -508,3 +511,11 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
return -EPERM;
}
+
+void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->te_handler)
+ manager->ops->te_handler(manager);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 9f74b10a8a01..690dcddab725 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -36,4 +36,11 @@ void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
unsigned int out_type);
+/*
+ * This function calls the crtc device(manager)'s te_handler() callback
+ * to trigger to transfer video image at the tearing effect synchronization
+ * signal.
+ */
+void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 3aa1c7ebbfcc..fa08f05e3e34 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -125,14 +125,18 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
static void exynos_dpi_poweron(struct exynos_dpi *ctx)
{
- if (ctx->panel)
+ if (ctx->panel) {
+ drm_panel_prepare(ctx->panel);
drm_panel_enable(ctx->panel);
+ }
}
static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
{
- if (ctx->panel)
+ if (ctx->panel) {
drm_panel_disable(ctx->panel);
+ drm_panel_unprepare(ctx->panel);
+ }
}
static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index d82e3cb8a70d..0d74e9b99c4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -358,7 +358,7 @@ static int exynos_drm_sys_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
pm_message_t message;
- if (pm_runtime_suspended(dev))
+ if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
message.event = PM_EVENT_SUSPEND;
@@ -369,7 +369,7 @@ static int exynos_drm_sys_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- if (pm_runtime_suspended(dev))
+ if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
return exynos_drm_resume(drm_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 02f3b3dcb9f8..69a6fa397d75 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -186,6 +186,8 @@ struct exynos_drm_display {
* @win_commit: apply hardware specific overlay data to registers.
* @win_enable: enable hardware specific overlay.
* @win_disable: disable hardware specific overlay.
+ * @te_handler: trigger to transfer video image at the tearing effect
+ * synchronization signal if there is a page flip request.
*/
struct exynos_drm_manager;
struct exynos_drm_manager_ops {
@@ -204,6 +206,7 @@ struct exynos_drm_manager_ops {
void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
+ void (*te_handler)(struct exynos_drm_manager *mgr);
};
/*
@@ -234,14 +237,9 @@ struct exynos_drm_g2d_private {
struct list_head userptr_list;
};
-struct exynos_drm_ipp_private {
- struct device *dev;
- struct list_head event_list;
-};
-
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
- struct exynos_drm_ipp_private *ipp_priv;
+ struct device *ipp_dev;
struct file *anon_filp;
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 2df3592166de..442aa2d00132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -16,7 +16,10 @@
#include <drm/drm_panel.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
@@ -24,6 +27,7 @@
#include <video/mipi_display.h>
#include <video/videomode.h>
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
/* returns true iff both arguments logically differs */
@@ -54,9 +58,12 @@
/* FIFO memory AC characteristic register */
#define DSIM_PLLCTRL_REG 0x4c /* PLL control register */
-#define DSIM_PLLTMR_REG 0x50 /* PLL timer register */
#define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */
#define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */
+#define DSIM_PHYCTRL_REG 0x5c
+#define DSIM_PHYTIMING_REG 0x64
+#define DSIM_PHYTIMING1_REG 0x68
+#define DSIM_PHYTIMING2_REG 0x6c
/* DSIM_STATUS */
#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
@@ -200,6 +207,24 @@
#define DSIM_PLL_M(x) ((x) << 4)
#define DSIM_PLL_S(x) ((x) << 1)
+/* DSIM_PHYCTRL */
+#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
+
+/* DSIM_PHYTIMING */
+#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
+#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
+
+/* DSIM_PHYTIMING1 */
+#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
+#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
+#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
+#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
+
+/* DSIM_PHYTIMING2 */
+#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
+#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
+#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
+
#define DSI_MAX_BUS_WIDTH 4
#define DSI_NUM_VIRTUAL_CHANNELS 4
#define DSI_TX_FIFO_SIZE 2048
@@ -233,6 +258,12 @@ struct exynos_dsi_transfer {
#define DSIM_STATE_INITIALIZED BIT(1)
#define DSIM_STATE_CMD_LPM BIT(2)
+struct exynos_dsi_driver_data {
+ unsigned int plltmr_reg;
+
+ unsigned int has_freqband:1;
+};
+
struct exynos_dsi {
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
@@ -247,6 +278,7 @@ struct exynos_dsi {
struct clk *bus_clk;
struct regulator_bulk_data supplies[2];
int irq;
+ int te_gpio;
u32 pll_clk_rate;
u32 burst_clk_rate;
@@ -262,11 +294,39 @@ struct exynos_dsi {
spinlock_t transfer_lock; /* protects transfer_list */
struct list_head transfer_list;
+
+ struct exynos_dsi_driver_data *driver_data;
};
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
+static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
+ .plltmr_reg = 0x50,
+ .has_freqband = 1,
+};
+
+static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
+ .plltmr_reg = 0x58,
+};
+
+static struct of_device_id exynos_dsi_of_match[] = {
+ { .compatible = "samsung,exynos4210-mipi-dsi",
+ .data = &exynos4_dsi_driver_data },
+ { .compatible = "samsung,exynos5410-mipi-dsi",
+ .data = &exynos5_dsi_driver_data },
+ { }
+};
+
+static inline struct exynos_dsi_driver_data *exynos_dsi_get_driver_data(
+ struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(exynos_dsi_of_match, &pdev->dev);
+
+ return (struct exynos_dsi_driver_data *)of_id->data;
+}
+
static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
{
if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
@@ -340,14 +400,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
unsigned long freq)
{
- static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
- };
+ struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
unsigned long fin, fout;
- int timeout, band;
+ int timeout;
u8 p, s;
u16 m;
u32 reg;
@@ -368,18 +423,30 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
"failed to find PLL PMS for requested frequency\n");
return -EFAULT;
}
+ dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
- for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
- if (fout < freq_bands[band])
- break;
+ writel(500, dsi->reg_base + driver_data->plltmr_reg);
+
+ reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
- dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout,
- p, m, s, band);
+ if (driver_data->has_freqband) {
+ static const unsigned long freq_bands[] = {
+ 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+ 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+ 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+ 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ };
+ int band;
- writel(500, dsi->reg_base + DSIM_PLLTMR_REG);
+ for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+ if (fout < freq_bands[band])
+ break;
+
+ dev_dbg(dsi->dev, "band %d\n", band);
+
+ reg |= DSIM_FREQ_BAND(band);
+ }
- reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN
- | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
timeout = 1000;
@@ -433,6 +500,59 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
return 0;
}
+static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
+{
+ struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ u32 reg;
+
+ if (driver_data->has_freqband)
+ return;
+
+ /* B D-PHY: D-PHY Master & Slave Analog Block control */
+ reg = DSIM_PHYCTRL_ULPS_EXIT(0x0af);
+ writel(reg, dsi->reg_base + DSIM_PHYCTRL_REG);
+
+ /*
+ * T LPX: Transmitted length of any Low-Power state period
+ * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
+ * burst
+ */
+ reg = DSIM_PHYTIMING_LPX(0x06) | DSIM_PHYTIMING_HS_EXIT(0x0b);
+ writel(reg, dsi->reg_base + DSIM_PHYTIMING_REG);
+
+ /*
+ * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Clock.
+ * T CLK_POST: Time that the transmitter continues to send HS clock
+ * after the last associated Data Lane has transitioned to LP Mode
+ * Interval is defined as the period from the end of T HS-TRAIL to
+ * the beginning of T CLK-TRAIL
+ * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
+ * the last payload clock bit of a HS transmission burst
+ */
+ reg = DSIM_PHYTIMING1_CLK_PREPARE(0x07) |
+ DSIM_PHYTIMING1_CLK_ZERO(0x27) |
+ DSIM_PHYTIMING1_CLK_POST(0x0d) |
+ DSIM_PHYTIMING1_CLK_TRAIL(0x08);
+ writel(reg, dsi->reg_base + DSIM_PHYTIMING1_REG);
+
+ /*
+ * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Sync sequence.
+ * T HS-TRAIL: Time that the transmitter drives the flipped differential
+ * state after last payload data bit of a HS transmission burst
+ */
+ reg = DSIM_PHYTIMING2_HS_PREPARE(0x09) | DSIM_PHYTIMING2_HS_ZERO(0x0d) |
+ DSIM_PHYTIMING2_HS_TRAIL(0x0b);
+ writel(reg, dsi->reg_base + DSIM_PHYTIMING2_REG);
+}
+
static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
{
u32 reg;
@@ -468,13 +588,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
/* DSI configuration */
reg = 0;
+ /*
+ * The first bit of mode_flags specifies display configuration.
+ * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
+ * mode, otherwise it will support command mode.
+ */
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
reg |= DSIM_VIDEO_MODE;
+ /*
+ * The user manual describes that following bits are ignored in
+ * command mode.
+ */
if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
reg |= DSIM_MFLUSH_VS;
- if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
- reg |= DSIM_EOT_DISABLE;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
reg |= DSIM_SYNC_INFORM;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
@@ -491,6 +618,9 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
reg |= DSIM_HSA_MODE;
}
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+ reg |= DSIM_EOT_DISABLE;
+
switch (dsi->format) {
case MIPI_DSI_FMT_RGB888:
reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
@@ -944,17 +1074,90 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
+{
+ struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
+ struct drm_encoder *encoder = dsi->encoder;
+
+ if (dsi->state & DSIM_STATE_ENABLED)
+ exynos_drm_crtc_te_handler(encoder->crtc);
+
+ return IRQ_HANDLED;
+}
+
+static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
+{
+ enable_irq(dsi->irq);
+
+ if (gpio_is_valid(dsi->te_gpio))
+ enable_irq(gpio_to_irq(dsi->te_gpio));
+}
+
+static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
+{
+ if (gpio_is_valid(dsi->te_gpio))
+ disable_irq(gpio_to_irq(dsi->te_gpio));
+
+ disable_irq(dsi->irq);
+}
+
static int exynos_dsi_init(struct exynos_dsi *dsi)
{
- exynos_dsi_enable_clock(dsi);
exynos_dsi_reset(dsi);
- enable_irq(dsi->irq);
+ exynos_dsi_enable_irq(dsi);
+ exynos_dsi_enable_clock(dsi);
exynos_dsi_wait_for_reset(dsi);
+ exynos_dsi_set_phy_ctrl(dsi);
exynos_dsi_init_link(dsi);
return 0;
}
+static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
+{
+ int ret;
+
+ dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+ if (!gpio_is_valid(dsi->te_gpio)) {
+ dev_err(dsi->dev, "no te-gpios specified\n");
+ ret = dsi->te_gpio;
+ goto out;
+ }
+
+ ret = gpio_request_one(dsi->te_gpio, GPIOF_IN, "te_gpio");
+ if (ret) {
+ dev_err(dsi->dev, "gpio request failed with %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel
+ * calls drm_panel_init() first then calls mipi_dsi_attach() in probe().
+ * It means that te_gpio is invalid when exynos_dsi_enable_irq() is
+ * called by drm_panel_init() before panel is attached.
+ */
+ ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio),
+ exynos_dsi_te_irq_handler, NULL,
+ IRQF_TRIGGER_RISING, "TE", dsi);
+ if (ret) {
+ dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
+ gpio_free(dsi->te_gpio);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
+{
+ if (gpio_is_valid(dsi->te_gpio)) {
+ free_irq(gpio_to_irq(dsi->te_gpio), dsi);
+ gpio_free(dsi->te_gpio);
+ dsi->te_gpio = -ENOENT;
+ }
+}
+
static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
@@ -968,6 +1171,19 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
if (dsi->connector.dev)
drm_helper_hpd_irq_event(dsi->connector.dev);
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ int ret = exynos_dsi_register_te_irq(dsi);
+
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -976,6 +1192,8 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
{
struct exynos_dsi *dsi = host_to_dsi(host);
+ exynos_dsi_unregister_te_irq(dsi);
+
dsi->panel_node = NULL;
if (dsi->connector.dev)
@@ -1089,7 +1307,7 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
exynos_dsi_disable_clock(dsi);
- disable_irq(dsi->irq);
+ exynos_dsi_disable_irq(dsi);
}
dsi->state &= ~DSIM_STATE_CMD_LPM;
@@ -1115,7 +1333,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
if (ret < 0)
return ret;
- ret = drm_panel_enable(dsi->panel);
+ ret = drm_panel_prepare(dsi->panel);
if (ret < 0) {
exynos_dsi_poweroff(dsi);
return ret;
@@ -1124,6 +1342,14 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
exynos_dsi_set_display_mode(dsi);
exynos_dsi_set_display_enable(dsi, true);
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_unprepare(dsi->panel);
+ exynos_dsi_poweroff(dsi);
+ return ret;
+ }
+
dsi->state |= DSIM_STATE_ENABLED;
return 0;
@@ -1134,8 +1360,9 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
- exynos_dsi_set_display_enable(dsi, false);
drm_panel_disable(dsi->panel);
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_unprepare(dsi->panel);
exynos_dsi_poweroff(dsi);
dsi->state &= ~DSIM_STATE_ENABLED;
@@ -1278,6 +1505,7 @@ static struct exynos_drm_display exynos_dsi_display = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.ops = &exynos_dsi_display_ops,
};
+MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
/* of_* functions will be removed after merge of of_graph patches */
static struct device_node *
@@ -1435,6 +1663,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
goto err_del_component;
}
+ /* To be checked as invalid one */
+ dsi->te_gpio = -ENOENT;
+
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
INIT_LIST_HEAD(&dsi->transfer_list);
@@ -1443,6 +1674,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->dsi_host.dev = &pdev->dev;
dsi->dev = &pdev->dev;
+ dsi->driver_data = exynos_dsi_get_driver_data(pdev);
ret = exynos_dsi_parse_dt(dsi);
if (ret)
@@ -1525,11 +1757,6 @@ static int exynos_dsi_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id exynos_dsi_of_match[] = {
- { .compatible = "samsung,exynos4210-mipi-dsi" },
- { }
-};
-
struct platform_driver dsi_driver = {
.probe = exynos_dsi_probe,
.remove = exynos_dsi_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 831dde9034c6..ec7cc9ea50df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1887,6 +1887,7 @@ static const struct of_device_id fimc_of_match[] = {
{ .compatible = "samsung,exynos4212-fimc" },
{ },
};
+MODULE_DEVICE_TABLE(of, fimc_of_match);
struct platform_driver fimc_driver = {
.probe = fimc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 33161ad38201..5d09e33fef87 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -20,6 +20,8 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@@ -61,6 +63,24 @@
/* color key value register for hardware window 1 ~ 4. */
#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
+/* I80 / RGB trigger control register */
+#define TRIGCON 0x1A4
+#define TRGMODE_I80_RGB_ENABLE_I80 (1 << 0)
+#define SWTRGCMD_I80_RGB_ENABLE (1 << 1)
+
+/* display mode change control register except exynos4 */
+#define VIDOUT_CON 0x000
+#define VIDOUT_CON_F_I80_LDI0 (0x2 << 8)
+
+/* I80 interface control for main LDI register */
+#define I80IFCONFAx(x) (0x1B0 + (x) * 4)
+#define I80IFCONFBx(x) (0x1B8 + (x) * 4)
+#define LCD_CS_SETUP(x) ((x) << 16)
+#define LCD_WR_SETUP(x) ((x) << 12)
+#define LCD_WR_ACTIVE(x) ((x) << 8)
+#define LCD_WR_HOLD(x) ((x) << 4)
+#define I80IFEN_ENABLE (1 << 0)
+
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
@@ -68,10 +88,14 @@
struct fimd_driver_data {
unsigned int timing_base;
+ unsigned int lcdblk_offset;
+ unsigned int lcdblk_vt_shift;
+ unsigned int lcdblk_bypass_shift;
unsigned int has_shadowcon:1;
unsigned int has_clksel:1;
unsigned int has_limited_fmt:1;
+ unsigned int has_vidoutcon:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -82,12 +106,19 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
+ .lcdblk_offset = 0x210,
+ .lcdblk_vt_shift = 10,
+ .lcdblk_bypass_shift = 1,
.has_shadowcon = 1,
};
static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
+ .lcdblk_offset = 0x214,
+ .lcdblk_vt_shift = 24,
+ .lcdblk_bypass_shift = 15,
.has_shadowcon = 1,
+ .has_vidoutcon = 1,
};
struct fimd_win_data {
@@ -112,15 +143,22 @@ struct fimd_context {
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
+ struct regmap *sysreg;
struct drm_display_mode mode;
struct fimd_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
+ u32 vidcon0;
u32 vidcon1;
+ u32 vidout_con;
+ u32 i80ifcon;
+ bool i80_if;
bool suspended;
int pipe;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
+ atomic_t win_updated;
+ atomic_t triggering;
struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
@@ -136,6 +174,7 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos5_fimd_driver_data },
{},
};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
@@ -243,6 +282,14 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
u32 clkdiv;
+ if (ctx->i80_if) {
+ /*
+ * The frame done interrupt should be occurred prior to the
+ * next TE signal.
+ */
+ ideal_clk *= 2;
+ }
+
/* Find the clock divider value that gets us closest to ideal_clk */
clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
@@ -271,11 +318,10 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
struct drm_display_mode *mode = &ctx->mode;
- struct fimd_driver_data *driver_data;
- u32 val, clkdiv, vidcon1;
- int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+ struct fimd_driver_data *driver_data = ctx->driver_data;
+ void *timing_base = ctx->regs + driver_data->timing_base;
+ u32 val, clkdiv;
- driver_data = ctx->driver_data;
if (ctx->suspended)
return;
@@ -283,33 +329,65 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
if (mode->htotal == 0 || mode->vtotal == 0)
return;
- /* setup polarity values */
- vidcon1 = ctx->vidcon1;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- vidcon1 |= VIDCON1_INV_VSYNC;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- vidcon1 |= VIDCON1_INV_HSYNC;
- writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
-
- /* setup vertical timing values. */
- vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
- vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
- vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
-
- val = VIDTCON0_VBPD(vbpd - 1) |
- VIDTCON0_VFPD(vfpd - 1) |
- VIDTCON0_VSPW(vsync_len - 1);
- writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
-
- /* setup horizontal timing values. */
- hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
- hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
- hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
-
- val = VIDTCON1_HBPD(hbpd - 1) |
- VIDTCON1_HFPD(hfpd - 1) |
- VIDTCON1_HSPW(hsync_len - 1);
- writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+ if (ctx->i80_if) {
+ val = ctx->i80ifcon | I80IFEN_ENABLE;
+ writel(val, timing_base + I80IFCONFAx(0));
+
+ /* disable auto frame rate */
+ writel(0, timing_base + I80IFCONFBx(0));
+
+ /* set video type selection to I80 interface */
+ if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+ driver_data->lcdblk_offset,
+ 0x3 << driver_data->lcdblk_vt_shift,
+ 0x1 << driver_data->lcdblk_vt_shift)) {
+ DRM_ERROR("Failed to update sysreg for I80 i/f.\n");
+ return;
+ }
+ } else {
+ int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+ u32 vidcon1;
+
+ /* setup polarity values */
+ vidcon1 = ctx->vidcon1;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ vidcon1 |= VIDCON1_INV_VSYNC;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ vidcon1 |= VIDCON1_INV_HSYNC;
+ writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
+
+ /* setup vertical timing values. */
+ vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
+ vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
+
+ val = VIDTCON0_VBPD(vbpd - 1) |
+ VIDTCON0_VFPD(vfpd - 1) |
+ VIDTCON0_VSPW(vsync_len - 1);
+ writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
+
+ /* setup horizontal timing values. */
+ hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
+ hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
+
+ val = VIDTCON1_HBPD(hbpd - 1) |
+ VIDTCON1_HFPD(hfpd - 1) |
+ VIDTCON1_HSPW(hsync_len - 1);
+ writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+ }
+
+ if (driver_data->has_vidoutcon)
+ writel(ctx->vidout_con, timing_base + VIDOUT_CON);
+
+ /* set bypass selection */
+ if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+ driver_data->lcdblk_offset,
+ 0x1 << driver_data->lcdblk_bypass_shift,
+ 0x1 << driver_data->lcdblk_bypass_shift)) {
+ DRM_ERROR("Failed to update sysreg for bypass setting.\n");
+ return;
+ }
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
@@ -322,7 +400,8 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
* fields of register with prefix '_F' would be updated
* at vsync(same as dma start)
*/
- val = VIDCON0_ENVID | VIDCON0_ENVID_F;
+ val = ctx->vidcon0;
+ val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
if (ctx->driver_data->has_clksel)
val |= VIDCON0_CLKSEL_LCD;
@@ -660,6 +739,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
}
win_data->enabled = true;
+
+ if (ctx->i80_if)
+ atomic_set(&ctx->win_updated, 1);
}
static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
@@ -838,6 +920,58 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
}
}
+static void fimd_trigger(struct device *dev)
+{
+ struct exynos_drm_manager *mgr = get_fimd_manager(dev);
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_driver_data *driver_data = ctx->driver_data;
+ void *timing_base = ctx->regs + driver_data->timing_base;
+ u32 reg;
+
+ atomic_set(&ctx->triggering, 1);
+
+ reg = readl(ctx->regs + VIDINTCON0);
+ reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE |
+ VIDINTCON0_INT_SYSMAINCON);
+ writel(reg, ctx->regs + VIDINTCON0);
+
+ reg = readl(timing_base + TRIGCON);
+ reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
+ writel(reg, timing_base + TRIGCON);
+}
+
+static void fimd_te_handler(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+
+ /* Checks the crtc is detached already from encoder */
+ if (ctx->pipe < 0 || !ctx->drm_dev)
+ return;
+
+ /*
+ * Skips to trigger if in triggering state, because multiple triggering
+ * requests can cause panel reset.
+ */
+ if (atomic_read(&ctx->triggering))
+ return;
+
+ /*
+ * If there is a page flip request, triggers and handles the page flip
+ * event so that current fb can be updated into panel GRAM.
+ */
+ if (atomic_add_unless(&ctx->win_updated, -1, 0))
+ fimd_trigger(ctx->dev);
+
+ /* Wakes up vsync event queue */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+
+ if (!atomic_read(&ctx->triggering))
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ }
+}
+
static struct exynos_drm_manager_ops fimd_manager_ops = {
.dpms = fimd_dpms,
.mode_fixup = fimd_mode_fixup,
@@ -849,6 +983,7 @@ static struct exynos_drm_manager_ops fimd_manager_ops = {
.win_mode_set = fimd_win_mode_set,
.win_commit = fimd_win_commit,
.win_disable = fimd_win_disable,
+ .te_handler = fimd_te_handler,
};
static struct exynos_drm_manager fimd_manager = {
@@ -859,26 +994,40 @@ static struct exynos_drm_manager fimd_manager = {
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
- u32 val;
+ u32 val, clear_bit;
val = readl(ctx->regs + VIDINTCON1);
- if (val & VIDINTCON1_INT_FRAME)
- /* VSYNC interrupt */
- writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
+ if (val & clear_bit)
+ writel(clear_bit, ctx->regs + VIDINTCON1);
/* check the crtc is detached already from encoder */
if (ctx->pipe < 0 || !ctx->drm_dev)
goto out;
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ if (ctx->i80_if) {
+ /* unset I80 frame done interrupt */
+ val = readl(ctx->regs + VIDINTCON0);
+ val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON);
+ writel(val, ctx->regs + VIDINTCON0);
- /* set wait vsync event to zero and wake up queue. */
- if (atomic_read(&ctx->wait_vsync_event)) {
- atomic_set(&ctx->wait_vsync_event, 0);
- wake_up(&ctx->wait_vsync_queue);
+ /* exit triggering mode */
+ atomic_set(&ctx->triggering, 0);
+
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ } else {
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+ }
}
+
out:
return IRQ_HANDLED;
}
@@ -923,6 +1072,7 @@ static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
+ struct device_node *i80_if_timings;
struct resource *res;
int ret = -EINVAL;
@@ -944,12 +1094,51 @@ static int fimd_probe(struct platform_device *pdev)
ctx->dev = dev;
ctx->suspended = true;
+ ctx->driver_data = drm_fimd_get_driver_data(pdev);
if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
ctx->vidcon1 |= VIDCON1_INV_VDEN;
if (of_property_read_bool(dev->of_node, "samsung,invert-vclk"))
ctx->vidcon1 |= VIDCON1_INV_VCLK;
+ i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
+ if (i80_if_timings) {
+ u32 val;
+
+ ctx->i80_if = true;
+
+ if (ctx->driver_data->has_vidoutcon)
+ ctx->vidout_con |= VIDOUT_CON_F_I80_LDI0;
+ else
+ ctx->vidcon0 |= VIDCON0_VIDOUT_I80_LDI0;
+ /*
+ * The user manual describes that this "DSI_EN" bit is required
+ * to enable I80 24-bit data interface.
+ */
+ ctx->vidcon0 |= VIDCON0_DSI_EN;
+
+ if (of_property_read_u32(i80_if_timings, "cs-setup", &val))
+ val = 0;
+ ctx->i80ifcon = LCD_CS_SETUP(val);
+ if (of_property_read_u32(i80_if_timings, "wr-setup", &val))
+ val = 0;
+ ctx->i80ifcon |= LCD_WR_SETUP(val);
+ if (of_property_read_u32(i80_if_timings, "wr-active", &val))
+ val = 1;
+ ctx->i80ifcon |= LCD_WR_ACTIVE(val);
+ if (of_property_read_u32(i80_if_timings, "wr-hold", &val))
+ val = 0;
+ ctx->i80ifcon |= LCD_WR_HOLD(val);
+ }
+ of_node_put(i80_if_timings);
+
+ ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,sysreg");
+ if (IS_ERR(ctx->sysreg)) {
+ dev_warn(dev, "failed to get system register.\n");
+ ctx->sysreg = NULL;
+ }
+
ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
@@ -972,7 +1161,8 @@ static int fimd_probe(struct platform_device *pdev)
goto err_del_component;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ ctx->i80_if ? "lcd_sys" : "vsync");
if (!res) {
dev_err(dev, "irq request failed.\n");
ret = -ENXIO;
@@ -986,7 +1176,6 @@ static int fimd_probe(struct platform_device *pdev)
goto err_del_component;
}
- ctx->driver_data = drm_fimd_get_driver_data(pdev);
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 800158714473..df7a77d3eff8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1042,8 +1042,23 @@ err:
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev;
+ struct g2d_data *g2d;
struct drm_exynos_g2d_get_ver *ver = data;
+ if (!g2d_priv)
+ return -ENODEV;
+
+ dev = g2d_priv->dev;
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
@@ -1056,7 +1071,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev = g2d_priv->dev;
+ struct device *dev;
struct g2d_data *g2d;
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
@@ -1067,6 +1082,10 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
int size;
int ret;
+ if (!g2d_priv)
+ return -ENODEV;
+
+ dev = g2d_priv->dev;
if (!dev)
return -ENODEV;
@@ -1223,13 +1242,17 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev = g2d_priv->dev;
+ struct device *dev;
struct g2d_data *g2d;
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
+ if (!g2d_priv)
+ return -ENODEV;
+
+ dev = g2d_priv->dev;
if (!dev)
return -ENODEV;
@@ -1544,8 +1567,10 @@ static const struct dev_pm_ops g2d_pm_ops = {
static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
+ { .compatible = "samsung,exynos4212-g2d" },
{},
};
+MODULE_DEVICE_TABLE(of, exynos_g2d_match);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 163a054922cb..15db80138382 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -301,7 +301,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, filp, gem_handle);
@@ -310,8 +309,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
return;
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
drm_gem_object_unreference_unlocked(obj);
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index a1888e128f1d..c411399070d6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -129,9 +129,6 @@ void exynos_platform_device_ipp_unregister(void)
int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
{
- if (!ippdrv)
- return -EINVAL;
-
mutex_lock(&exynos_drm_ippdrv_lock);
list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -141,9 +138,6 @@ int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
{
- if (!ippdrv)
- return -EINVAL;
-
mutex_lock(&exynos_drm_ippdrv_lock);
list_del(&ippdrv->drv_list);
mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -151,20 +145,15 @@ int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
return 0;
}
-static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
- u32 *idp)
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
{
int ret;
- /* do the allocation under our mutexlock */
mutex_lock(lock);
ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
mutex_unlock(lock);
- if (ret < 0)
- return ret;
- *idp = ret;
- return 0;
+ return ret;
}
static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
@@ -178,35 +167,25 @@ static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
{
void *obj;
- DRM_DEBUG_KMS("id[%d]\n", id);
-
mutex_lock(lock);
-
- /* find object using handle */
obj = idr_find(id_idr, id);
- if (!obj) {
- DRM_ERROR("failed to find object.\n");
- mutex_unlock(lock);
- return ERR_PTR(-ENODEV);
- }
-
mutex_unlock(lock);
return obj;
}
-static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
- enum drm_exynos_ipp_cmd cmd)
+static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
{
- /*
- * check dedicated flag and WB, OUTPUT operation with
- * power on state.
- */
- if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
- !pm_runtime_suspended(ippdrv->dev)))
- return true;
+ if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
+ !pm_runtime_suspended(ippdrv->dev)))
+ return -EBUSY;
- return false;
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property))
+ return -EINVAL;
+
+ return 0;
}
static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
@@ -214,62 +193,30 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
{
struct exynos_drm_ippdrv *ippdrv;
u32 ipp_id = property->ipp_id;
-
- DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
+ int ret;
if (ipp_id) {
- /* find ipp driver using idr */
- ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
- ipp_id);
- if (IS_ERR(ippdrv)) {
- DRM_ERROR("not found ipp%d driver.\n", ipp_id);
- return ippdrv;
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
+ if (!ippdrv) {
+ DRM_DEBUG("ipp%d driver not found\n", ipp_id);
+ return ERR_PTR(-ENODEV);
}
- /*
- * WB, OUTPUT opertion not supported multi-operation.
- * so, make dedicated state at set property ioctl.
- * when ipp driver finished operations, clear dedicated flags.
- */
- if (ipp_check_dedicated(ippdrv, property->cmd)) {
- DRM_ERROR("already used choose device.\n");
- return ERR_PTR(-EBUSY);
- }
-
- /*
- * This is necessary to find correct device in ipp drivers.
- * ipp drivers have different abilities,
- * so need to check property.
- */
- if (ippdrv->check_property &&
- ippdrv->check_property(ippdrv->dev, property)) {
- DRM_ERROR("not support property.\n");
- return ERR_PTR(-EINVAL);
+ ret = ipp_check_driver(ippdrv, property);
+ if (ret < 0) {
+ DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
+ return ERR_PTR(ret);
}
return ippdrv;
} else {
- /*
- * This case is search all ipp driver for finding.
- * user application don't set ipp_id in this case,
- * so ipp subsystem search correct driver in driver list.
- */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- if (ipp_check_dedicated(ippdrv, property->cmd)) {
- DRM_DEBUG_KMS("used device.\n");
- continue;
- }
-
- if (ippdrv->check_property &&
- ippdrv->check_property(ippdrv->dev, property)) {
- DRM_DEBUG_KMS("not support property.\n");
- continue;
- }
-
- return ippdrv;
+ ret = ipp_check_driver(ippdrv, property);
+ if (ret == 0)
+ return ippdrv;
}
- DRM_ERROR("not support ipp driver operations.\n");
+ DRM_DEBUG("cannot find driver suitable for given property.\n");
}
return ERR_PTR(-ENODEV);
@@ -308,8 +255,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_prop_list *prop_list = data;
struct exynos_drm_ippdrv *ippdrv;
@@ -346,10 +292,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
*/
ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
prop_list->ipp_id);
- if (IS_ERR(ippdrv)) {
+ if (!ippdrv) {
DRM_ERROR("not found ipp%d driver.\n",
prop_list->ipp_id);
- return PTR_ERR(ippdrv);
+ return -ENODEV;
}
*prop_list = ippdrv->prop_list;
@@ -432,7 +378,7 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
if (!event_work)
return ERR_PTR(-ENOMEM);
- INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+ INIT_WORK(&event_work->work, ipp_sched_event);
return event_work;
}
@@ -441,8 +387,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_property *property = data;
struct exynos_drm_ippdrv *ippdrv;
@@ -489,19 +434,18 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
if (!c_node)
return -ENOMEM;
- /* create property id */
- ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
- &property->prop_id);
- if (ret) {
+ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
+ if (ret < 0) {
DRM_ERROR("failed to create id.\n");
goto err_clear;
}
+ property->prop_id = ret;
DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
property->prop_id, property->cmd, (int)ippdrv);
/* stored property information and ippdrv in private data */
- c_node->priv = priv;
+ c_node->dev = dev;
c_node->property = *property;
c_node->state = IPP_STATE_IDLE;
@@ -534,7 +478,6 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
INIT_LIST_HEAD(&c_node->mem_list[i]);
INIT_LIST_HEAD(&c_node->event_list);
- list_splice_init(&priv->event_list, &c_node->event_list);
mutex_lock(&ippdrv->cmd_lock);
list_add_tail(&c_node->list, &ippdrv->cmd_list);
mutex_unlock(&ippdrv->cmd_lock);
@@ -577,42 +520,18 @@ static void ipp_clean_cmd_node(struct ipp_context *ctx,
kfree(c_node);
}
-static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
{
- struct drm_exynos_ipp_property *property = &c_node->property;
- struct drm_exynos_ipp_mem_node *m_node;
- struct list_head *head;
- int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
-
- for_each_ipp_ops(i) {
- /* source/destination memory list */
- head = &c_node->mem_list[i];
-
- /* find memory node entry */
- list_for_each_entry(m_node, head, list) {
- DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
- i ? "dst" : "src", count[i], (int)m_node);
- count[i]++;
- }
+ switch (c_node->property.cmd) {
+ case IPP_CMD_WB:
+ return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
+ case IPP_CMD_OUTPUT:
+ return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
+ case IPP_CMD_M2M:
+ default:
+ return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
+ !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
}
-
- DRM_DEBUG_KMS("min[%d]max[%d]\n",
- min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
- max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
-
- /*
- * M2M operations should be need paired memory address.
- * so, need to check minimum count about src, dst.
- * other case not use paired memory, so use maximum count
- */
- if (ipp_is_m2m_cmd(property->cmd))
- ret = min(count[EXYNOS_DRM_OPS_SRC],
- count[EXYNOS_DRM_OPS_DST]);
- else
- ret = max(count[EXYNOS_DRM_OPS_SRC],
- count[EXYNOS_DRM_OPS_DST]);
-
- return ret;
}
static struct drm_exynos_ipp_mem_node
@@ -683,16 +602,14 @@ static struct drm_exynos_ipp_mem_node
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_buf_info buf_info;
- void *addr;
+ struct drm_exynos_ipp_buf_info *buf_info;
int i;
m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
if (!m_node)
return ERR_PTR(-ENOMEM);
- /* clear base address for error handling */
- memset(&buf_info, 0x0, sizeof(buf_info));
+ buf_info = &m_node->buf_info;
/* operations, buffer id */
m_node->ops_id = qbuf->ops_id;
@@ -707,6 +624,8 @@ static struct drm_exynos_ipp_mem_node
/* get dma address by handle */
if (qbuf->handle[i]) {
+ dma_addr_t *addr;
+
addr = exynos_drm_gem_get_dma_addr(drm_dev,
qbuf->handle[i], file);
if (IS_ERR(addr)) {
@@ -714,15 +633,14 @@ static struct drm_exynos_ipp_mem_node
goto err_clear;
}
- buf_info.handles[i] = qbuf->handle[i];
- buf_info.base[i] = *(dma_addr_t *) addr;
- DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
- i, buf_info.base[i], (int)buf_info.handles[i]);
+ buf_info->handles[i] = qbuf->handle[i];
+ buf_info->base[i] = *addr;
+ DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
+ buf_info->base[i], buf_info->handles[i]);
}
}
m_node->filp = file;
- m_node->buf_info = buf_info;
mutex_lock(&c_node->mem_lock);
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
@@ -930,8 +848,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_queue_buf *qbuf = data;
struct drm_exynos_ipp_cmd_node *c_node;
@@ -955,9 +872,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id);
- if (IS_ERR(c_node)) {
+ if (!c_node) {
DRM_ERROR("failed to get command node.\n");
- return PTR_ERR(c_node);
+ return -ENODEV;
}
/* buffer control */
@@ -1062,9 +979,8 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
struct drm_exynos_ipp_cmd_work *cmd_work;
@@ -1091,9 +1007,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id);
- if (IS_ERR(c_node)) {
+ if (!c_node) {
DRM_ERROR("invalid command node list.\n");
- return PTR_ERR(c_node);
+ return -ENODEV;
}
if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
@@ -1198,7 +1114,6 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
/* reset h/w block */
if (ippdrv->reset &&
ippdrv->reset(ippdrv->dev)) {
- DRM_ERROR("failed to reset.\n");
return -EINVAL;
}
@@ -1216,30 +1131,24 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
/* set format */
if (ops->set_fmt) {
ret = ops->set_fmt(ippdrv->dev, config->fmt);
- if (ret) {
- DRM_ERROR("not support format.\n");
+ if (ret)
return ret;
- }
}
/* set transform for rotation, flip */
if (ops->set_transf) {
ret = ops->set_transf(ippdrv->dev, config->degree,
config->flip, &swap);
- if (ret) {
- DRM_ERROR("not support tranf.\n");
- return -EINVAL;
- }
+ if (ret)
+ return ret;
}
/* set size */
if (ops->set_size) {
ret = ops->set_size(ippdrv->dev, swap, &config->pos,
&config->sz);
- if (ret) {
- DRM_ERROR("not support size.\n");
+ if (ret)
return ret;
- }
}
}
@@ -1283,11 +1192,6 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- if (!m_node) {
- DRM_ERROR("failed to get node.\n");
- ret = -EFAULT;
- goto err_unlock;
- }
DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
@@ -1545,11 +1449,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- if (!m_node) {
- DRM_ERROR("empty memory node.\n");
- ret = -ENOMEM;
- goto err_mem_unlock;
- }
tbuf_id[i] = m_node->buf_id;
DRM_DEBUG_KMS("%s buf_id[%d]\n",
@@ -1586,11 +1485,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- if (!m_node) {
- DRM_ERROR("empty memory node.\n");
- ret = -ENOMEM;
- goto err_mem_unlock;
- }
tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
@@ -1704,21 +1598,17 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
/* get ipp driver entry */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- u32 ipp_id;
-
ippdrv->drm_dev = drm_dev;
- ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
- &ipp_id);
- if (ret || ipp_id == 0) {
+ ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
+ if (ret < 0) {
DRM_ERROR("failed to create id.\n");
goto err;
}
+ ippdrv->prop_list.ipp_id = ret;
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
- count++, (int)ippdrv, ipp_id);
-
- ippdrv->prop_list.ipp_id = ipp_id;
+ count++, (int)ippdrv, ret);
/* store parent device for node */
ippdrv->parent_dev = dev;
@@ -1776,17 +1666,10 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- priv->dev = dev;
- file_priv->ipp_priv = priv;
- INIT_LIST_HEAD(&priv->event_list);
+ file_priv->ipp_dev = dev;
- DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
+ DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
return 0;
}
@@ -1795,13 +1678,12 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
int count = 0;
- DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
+ DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
mutex_lock(&ippdrv->cmd_lock);
@@ -1810,7 +1692,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
count++, (int)ippdrv);
- if (c_node->priv == priv) {
+ if (c_node->dev == file_priv->ipp_dev) {
/*
* userland goto unnormal state. process killed.
* and close the file.
@@ -1832,7 +1714,6 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
mutex_unlock(&ippdrv->cmd_lock);
}
- kfree(priv);
return;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 7aaeaae757c2..6f48d62aeb30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,7 @@ struct drm_exynos_ipp_cmd_work {
/*
* A structure of command node.
*
- * @priv: IPP private information.
+ * @dev: IPP device.
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
@@ -64,7 +64,7 @@ struct drm_exynos_ipp_cmd_work {
* @state: state of command node.
*/
struct drm_exynos_ipp_cmd_node {
- struct exynos_drm_ipp_private *priv;
+ struct device *dev;
struct list_head list;
struct list_head event_list;
struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index f01fbb6dc1f0..55af6b41c1df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -691,6 +691,7 @@ static const struct of_device_id exynos_rotator_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, exynos_rotator_match);
static int rotator_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 81df11d57673..562966db2aa1 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -84,6 +84,7 @@ struct hdmi_resources {
struct clk *sclk_hdmiphy;
struct clk *mout_hdmi;
struct regulator_bulk_data *regul_bulk;
+ struct regulator *reg_hdmi_en;
int regul_count;
};
@@ -592,6 +593,13 @@ static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
.is_apb_phy = 0,
};
+static struct hdmi_driver_data exynos4210_hdmi_driver_data = {
+ .type = HDMI_TYPE13,
+ .phy_confs = hdmiphy_v13_configs,
+ .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs),
+ .is_apb_phy = 0,
+};
+
static struct hdmi_driver_data exynos5_hdmi_driver_data = {
.type = HDMI_TYPE14,
.phy_confs = hdmiphy_v13_configs,
@@ -1241,14 +1249,13 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
static void hdmi_audio_init(struct hdmi_context *hdata)
{
- u32 sample_rate, bits_per_sample, frame_size_code;
+ u32 sample_rate, bits_per_sample;
u32 data_num, bit_ch, sample_frq;
u32 val;
u8 acr[7];
sample_rate = 44100;
bits_per_sample = 16;
- frame_size_code = 0;
switch (bits_per_sample) {
case 20:
@@ -2168,7 +2175,6 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
struct device *dev = hdata->dev;
struct hdmi_resources *res = &hdata->res;
static char *supply[] = {
- "hdmi-en",
"vdd",
"vdd_osc",
"vdd_pll",
@@ -2228,6 +2234,20 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
}
res->regul_count = ARRAY_SIZE(supply);
+ res->reg_hdmi_en = devm_regulator_get(dev, "hdmi-en");
+ if (IS_ERR(res->reg_hdmi_en) && PTR_ERR(res->reg_hdmi_en) != -ENOENT) {
+ DRM_ERROR("failed to get hdmi-en regulator\n");
+ return PTR_ERR(res->reg_hdmi_en);
+ }
+ if (!IS_ERR(res->reg_hdmi_en)) {
+ ret = regulator_enable(res->reg_hdmi_en);
+ if (ret) {
+ DRM_ERROR("failed to enable hdmi-en regulator\n");
+ return ret;
+ }
+ } else
+ res->reg_hdmi_en = NULL;
+
return ret;
fail:
DRM_ERROR("HDMI resource init - failed\n");
@@ -2263,6 +2283,9 @@ static struct of_device_id hdmi_match_types[] = {
.compatible = "samsung,exynos5-hdmi",
.data = &exynos5_hdmi_driver_data,
}, {
+ .compatible = "samsung,exynos4210-hdmi",
+ .data = &exynos4210_hdmi_driver_data,
+ }, {
.compatible = "samsung,exynos4212-hdmi",
.data = &exynos4212_hdmi_driver_data,
}, {
@@ -2272,6 +2295,7 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
+MODULE_DEVICE_TABLE (of, hdmi_match_types);
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
@@ -2494,7 +2518,11 @@ static int hdmi_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&hdata->hotplug_work);
- put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->res.reg_hdmi_en)
+ regulator_disable(hdata->res.reg_hdmi_en);
+
+ if (hdata->hdmiphy_port)
+ put_device(&hdata->hdmiphy_port->dev);
put_device(&hdata->ddc_adpt->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7529946d0a74..e8b4ec84b312 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -76,7 +76,7 @@ struct mixer_resources {
struct clk *vp;
struct clk *sclk_mixer;
struct clk *sclk_hdmi;
- struct clk *sclk_dac;
+ struct clk *mout_mixer;
};
enum mixer_version_id {
@@ -93,6 +93,7 @@ struct mixer_context {
bool interlace;
bool powered;
bool vp_enabled;
+ bool has_sclk;
u32 int_en;
struct mutex mixer_mutex;
@@ -106,6 +107,7 @@ struct mixer_context {
struct mixer_drv_data {
enum mixer_version_id version;
bool is_vp_enabled;
+ bool has_sclk;
};
static const u8 filter_y_horiz_tap8[] = {
@@ -363,6 +365,11 @@ static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
mixer_reg_writemask(res, MXR_CFG, val,
MXR_CFG_VP_ENABLE);
+
+ /* control blending of graphic layer 0 */
+ mixer_reg_writemask(res, MXR_GRAPHIC_CFG(0), val,
+ MXR_GRP_CFG_BLEND_PRE_MUL |
+ MXR_GRP_CFG_PIXEL_BLEND_EN);
}
break;
}
@@ -809,19 +816,23 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
dev_err(dev, "failed to get clock 'vp'\n");
return -ENODEV;
}
- mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
- if (IS_ERR(mixer_res->sclk_mixer)) {
- dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- return -ENODEV;
- }
- mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
- if (IS_ERR(mixer_res->sclk_dac)) {
- dev_err(dev, "failed to get clock 'sclk_dac'\n");
- return -ENODEV;
- }
- if (mixer_res->sclk_hdmi)
- clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+ if (mixer_ctx->has_sclk) {
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+ if (IS_ERR(mixer_res->sclk_mixer)) {
+ dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+ return -ENODEV;
+ }
+ mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer");
+ if (IS_ERR(mixer_res->mout_mixer)) {
+ dev_err(dev, "failed to get clock 'mout_mixer'\n");
+ return -ENODEV;
+ }
+
+ if (mixer_res->sclk_hdmi && mixer_res->mout_mixer)
+ clk_set_parent(mixer_res->mout_mixer,
+ mixer_res->sclk_hdmi);
+ }
res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
@@ -1082,7 +1093,8 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
clk_prepare_enable(res->mixer);
if (ctx->vp_enabled) {
clk_prepare_enable(res->vp);
- clk_prepare_enable(res->sclk_mixer);
+ if (ctx->has_sclk)
+ clk_prepare_enable(res->sclk_mixer);
}
mutex_lock(&ctx->mixer_mutex);
@@ -1121,7 +1133,8 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
clk_disable_unprepare(res->mixer);
if (ctx->vp_enabled) {
clk_disable_unprepare(res->vp);
- clk_disable_unprepare(res->sclk_mixer);
+ if (ctx->has_sclk)
+ clk_disable_unprepare(res->sclk_mixer);
}
pm_runtime_put_sync(ctx->dev);
@@ -1189,9 +1202,15 @@ static struct mixer_drv_data exynos5250_mxr_drv_data = {
.is_vp_enabled = 0,
};
+static struct mixer_drv_data exynos4212_mxr_drv_data = {
+ .version = MXR_VER_0_0_0_16,
+ .is_vp_enabled = 1,
+};
+
static struct mixer_drv_data exynos4210_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
+ .has_sclk = 1,
};
static struct platform_device_id mixer_driver_types[] = {
@@ -1208,6 +1227,12 @@ static struct platform_device_id mixer_driver_types[] = {
static struct of_device_id mixer_match_types[] = {
{
+ .compatible = "samsung,exynos4210-mixer",
+ .data = &exynos4210_mxr_drv_data,
+ }, {
+ .compatible = "samsung,exynos4212-mixer",
+ .data = &exynos4212_mxr_drv_data,
+ }, {
.compatible = "samsung,exynos5-mixer",
.data = &exynos5250_mxr_drv_data,
}, {
@@ -1220,6 +1245,7 @@ static struct of_device_id mixer_match_types[] = {
/* end node */
}
};
+MODULE_DEVICE_TABLE(of, mixer_match_types);
static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
@@ -1251,6 +1277,7 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
ctx->pdev = pdev;
ctx->dev = dev;
ctx->vp_enabled = drv->is_vp_enabled;
+ ctx->has_sclk = drv->has_sclk;
ctx->mxr_ver = drv->version;
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index ac357b02bd35..d4762799351d 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -15,8 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-
-
+#include <linux/component.h>
#include <linux/hdmi.h>
#include <linux/module.h>
#include <linux/irq.h>
@@ -730,12 +729,9 @@ tda998x_configure_audio(struct tda998x_priv *priv,
/* DRM encoder functions */
-static void
-tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
+static void tda998x_encoder_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
- struct tda998x_encoder_params *p = params;
-
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
(p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
VIP_CNTRL_0_SWAP_B(p->swap_b) |
@@ -752,11 +748,8 @@ tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
priv->params = *p;
}
-static void
-tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tda998x_encoder_dpms(struct tda998x_priv *priv, int mode)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
/* we only care about on or off: */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
@@ -806,9 +799,8 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static int
-tda998x_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int tda998x_encoder_mode_valid(struct tda998x_priv *priv,
+ struct drm_display_mode *mode)
{
if (mode->clock > 150000)
return MODE_CLOCK_HIGH;
@@ -820,11 +812,10 @@ tda998x_encoder_mode_valid(struct drm_encoder *encoder,
}
static void
-tda998x_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+tda998x_encoder_mode_set(struct tda998x_priv *priv,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint16_t ref_pix, ref_line, n_pix, n_line;
uint16_t hs_pix_s, hs_pix_e;
uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
@@ -1012,20 +1003,16 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
static enum drm_connector_status
-tda998x_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+tda998x_encoder_detect(struct tda998x_priv *priv)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV);
return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
connector_status_disconnected;
}
-static int
-read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
+static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint8_t offset, segptr;
int ret, i;
@@ -1079,10 +1066,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
return 0;
}
-static uint8_t *
-do_get_edid(struct drm_encoder *encoder)
+static uint8_t *do_get_edid(struct tda998x_priv *priv)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
int j, valid_extensions = 0;
uint8_t *block, *new;
bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -1094,7 +1079,7 @@ do_get_edid(struct drm_encoder *encoder)
reg_clear(priv, REG_TX4, TX4_PD_RAM);
/* base block fetch */
- if (read_edid_block(encoder, block, 0))
+ if (read_edid_block(priv, block, 0))
goto fail;
if (!drm_edid_block_valid(block, 0, print_bad_edid))
@@ -1111,7 +1096,7 @@ do_get_edid(struct drm_encoder *encoder)
for (j = 1; j <= block[0x7e]; j++) {
uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
- if (read_edid_block(encoder, ext_block, j))
+ if (read_edid_block(priv, ext_block, j))
goto fail;
if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
@@ -1144,11 +1129,10 @@ fail:
}
static int
-tda998x_encoder_get_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
+tda998x_encoder_get_modes(struct tda998x_priv *priv,
+ struct drm_connector *connector)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
- struct edid *edid = (struct edid *)do_get_edid(encoder);
+ struct edid *edid = (struct edid *)do_get_edid(priv);
int n = 0;
if (edid) {
@@ -1161,18 +1145,14 @@ tda998x_encoder_get_modes(struct drm_encoder *encoder,
return n;
}
-static int
-tda998x_encoder_create_resources(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
+ struct drm_connector *connector)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
if (priv->hdmi->irq)
connector->polled = DRM_CONNECTOR_POLL_HPD;
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
- return 0;
}
static int
@@ -1185,66 +1165,97 @@ tda998x_encoder_set_property(struct drm_encoder *encoder,
return 0;
}
-static void
-tda998x_encoder_destroy(struct drm_encoder *encoder)
+static void tda998x_destroy(struct tda998x_priv *priv)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
if (priv->hdmi->irq)
free_irq(priv->hdmi->irq, priv);
- if (priv->cec)
- i2c_unregister_device(priv->cec);
+ i2c_unregister_device(priv->cec);
+}
+
+/* Slave encoder support */
+
+static void
+tda998x_encoder_slave_set_config(struct drm_encoder *encoder, void *params)
+{
+ tda998x_encoder_set_config(to_tda998x_priv(encoder), params);
+}
+
+static void tda998x_encoder_slave_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ tda998x_destroy(priv);
drm_i2c_encoder_destroy(encoder);
kfree(priv);
}
-static struct drm_encoder_slave_funcs tda998x_encoder_funcs = {
- .set_config = tda998x_encoder_set_config,
- .destroy = tda998x_encoder_destroy,
- .dpms = tda998x_encoder_dpms,
- .save = tda998x_encoder_save,
- .restore = tda998x_encoder_restore,
- .mode_fixup = tda998x_encoder_mode_fixup,
- .mode_valid = tda998x_encoder_mode_valid,
- .mode_set = tda998x_encoder_mode_set,
- .detect = tda998x_encoder_detect,
- .get_modes = tda998x_encoder_get_modes,
- .create_resources = tda998x_encoder_create_resources,
- .set_property = tda998x_encoder_set_property,
-};
+static void tda998x_encoder_slave_dpms(struct drm_encoder *encoder, int mode)
+{
+ tda998x_encoder_dpms(to_tda998x_priv(encoder), mode);
+}
-/* I2C driver functions */
+static int tda998x_encoder_slave_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ return tda998x_encoder_mode_valid(to_tda998x_priv(encoder), mode);
+}
-static int
-tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static void
+tda998x_encoder_slave_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- return 0;
+ tda998x_encoder_mode_set(to_tda998x_priv(encoder), mode, adjusted_mode);
+}
+
+static enum drm_connector_status
+tda998x_encoder_slave_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return tda998x_encoder_detect(to_tda998x_priv(encoder));
+}
+
+static int tda998x_encoder_slave_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return tda998x_encoder_get_modes(to_tda998x_priv(encoder), connector);
}
static int
-tda998x_remove(struct i2c_client *client)
+tda998x_encoder_slave_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
+ tda998x_encoder_set_polling(to_tda998x_priv(encoder), connector);
return 0;
}
-static int
-tda998x_encoder_init(struct i2c_client *client,
- struct drm_device *dev,
- struct drm_encoder_slave *encoder_slave)
+static struct drm_encoder_slave_funcs tda998x_encoder_slave_funcs = {
+ .set_config = tda998x_encoder_slave_set_config,
+ .destroy = tda998x_encoder_slave_destroy,
+ .dpms = tda998x_encoder_slave_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .mode_valid = tda998x_encoder_slave_mode_valid,
+ .mode_set = tda998x_encoder_slave_mode_set,
+ .detect = tda998x_encoder_slave_detect,
+ .get_modes = tda998x_encoder_slave_get_modes,
+ .create_resources = tda998x_encoder_slave_create_resources,
+ .set_property = tda998x_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
{
- struct tda998x_priv *priv;
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
@@ -1252,17 +1263,11 @@ tda998x_encoder_init(struct i2c_client *client,
priv->current_page = 0xff;
priv->hdmi = client;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
- if (!priv->cec) {
- kfree(priv);
+ if (!priv->cec)
return -ENODEV;
- }
- priv->encoder = &encoder_slave->base;
priv->dpms = DRM_MODE_DPMS_OFF;
- encoder_slave->slave_priv = priv;
- encoder_slave->slave_funcs = &tda998x_encoder_funcs;
-
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1365,12 +1370,231 @@ fail:
*/
if (priv->cec)
i2c_unregister_device(priv->cec);
- kfree(priv);
- encoder_slave->slave_priv = NULL;
- encoder_slave->slave_funcs = NULL;
return -ENXIO;
}
+static int tda998x_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder_slave)
+{
+ struct tda998x_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->encoder = &encoder_slave->base;
+
+ ret = tda998x_create(client, priv);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+
+ encoder_slave->slave_priv = priv;
+ encoder_slave->slave_funcs = &tda998x_encoder_slave_funcs;
+
+ return 0;
+}
+
+struct tda998x_priv2 {
+ struct tda998x_priv base;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+#define conn_to_tda998x_priv2(x) \
+ container_of(x, struct tda998x_priv2, connector);
+
+#define enc_to_tda998x_priv2(x) \
+ container_of(x, struct tda998x_priv2, encoder);
+
+static void tda998x_encoder2_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+
+ tda998x_encoder_dpms(&priv->base, mode);
+}
+
+static void tda998x_encoder_prepare(struct drm_encoder *encoder)
+{
+ tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void tda998x_encoder_commit(struct drm_encoder *encoder)
+{
+ tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void tda998x_encoder2_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+
+ tda998x_encoder_mode_set(&priv->base, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
+ .dpms = tda998x_encoder2_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .prepare = tda998x_encoder_prepare,
+ .commit = tda998x_encoder_commit,
+ .mode_set = tda998x_encoder2_mode_set,
+};
+
+static void tda998x_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+
+ tda998x_destroy(&priv->base);
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs tda998x_encoder_funcs = {
+ .destroy = tda998x_encoder_destroy,
+};
+
+static int tda998x_connector_get_modes(struct drm_connector *connector)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return tda998x_encoder_get_modes(&priv->base, connector);
+}
+
+static int tda998x_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return tda998x_encoder_mode_valid(&priv->base, mode);
+}
+
+static struct drm_encoder *
+tda998x_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return &priv->encoder;
+}
+
+static
+const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
+ .get_modes = tda998x_connector_get_modes,
+ .mode_valid = tda998x_connector_mode_valid,
+ .best_encoder = tda998x_connector_best_encoder,
+};
+
+static enum drm_connector_status
+tda998x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return tda998x_encoder_detect(&priv->base);
+}
+
+static void tda998x_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tda998x_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = tda998x_connector_detect,
+ .destroy = tda998x_connector_destroy,
+};
+
+static int tda998x_bind(struct device *dev, struct device *master, void *data)
+{
+ struct tda998x_encoder_params *params = dev->platform_data;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct drm_device *drm = data;
+ struct tda998x_priv2 *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->base.encoder = &priv->encoder;
+ priv->connector.interlace_allowed = 1;
+ priv->encoder.possible_crtcs = 1 << 0;
+
+ ret = tda998x_create(client, &priv->base);
+ if (ret)
+ return ret;
+
+ if (!dev->of_node && params)
+ tda998x_encoder_set_config(&priv->base, params);
+
+ tda998x_encoder_set_polling(&priv->base, &priv->connector);
+
+ drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
+ ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ goto err_encoder;
+
+ drm_connector_helper_add(&priv->connector,
+ &tda998x_connector_helper_funcs);
+ ret = drm_connector_init(drm, &priv->connector,
+ &tda998x_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret)
+ goto err_connector;
+
+ ret = drm_connector_register(&priv->connector);
+ if (ret)
+ goto err_sysfs;
+
+ priv->connector.encoder = &priv->encoder;
+ drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+
+ return 0;
+
+err_sysfs:
+ drm_connector_cleanup(&priv->connector);
+err_connector:
+ drm_encoder_cleanup(&priv->encoder);
+err_encoder:
+ tda998x_destroy(&priv->base);
+ return ret;
+}
+
+static void tda998x_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct tda998x_priv2 *priv = dev_get_drvdata(dev);
+
+ drm_connector_cleanup(&priv->connector);
+ drm_encoder_cleanup(&priv->encoder);
+ tda998x_destroy(&priv->base);
+}
+
+static const struct component_ops tda998x_ops = {
+ .bind = tda998x_bind,
+ .unbind = tda998x_unbind,
+};
+
+static int
+tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ return component_add(&client->dev, &tda998x_ops);
+}
+
+static int tda998x_remove(struct i2c_client *client)
+{
+ component_del(&client->dev, &tda998x_ops);
+ return 0;
+}
+
#ifdef CONFIG_OF
static const struct of_device_id tda998x_dt_ids[] = {
{ .compatible = "nxp,tda998x", },
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 437e1824d0bf..4e39ab34eb1c 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -69,15 +69,3 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
option changes the default for that module option.
If in doubt, say "N".
-
-config DRM_I915_UMS
- bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
- depends on DRM_I915 && BROKEN
- default n
- help
- Choose this option if you still need userspace modesetting.
-
- Userspace modesetting is deprecated for quite some time now, so
- enable this only if you have ancient versions of the DDX drivers.
-
- If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cad1683d8bb5..91bd167e1cb7 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += dvo_ch7017.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
+ intel_dp_mst.o \
intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 9d7954366bd2..dea99d92fb4a 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = {
GEN7_SO_WRITE_OFFSET(1),
GEN7_SO_WRITE_OFFSET(2),
GEN7_SO_WRITE_OFFSET(3),
+ GEN7_L3SQCREG1,
+ GEN7_L3CNTLREG2,
+ GEN7_L3CNTLREG3,
};
static const u32 gen7_blt_regs[] = {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b8c689202c40..9e737b771c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -170,11 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
+ if (obj->frontbuffer_bits)
+ seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{
- seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+ seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, ' ');
}
@@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
unsigned long flags;
struct intel_crtc *crtc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
@@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -985,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
i915_next_seqno_get, i915_next_seqno_set,
"0x%llx\n");
-static int i915_rstdby_delays(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 crstanddelay;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- crstanddelay = I915_READ16(CRSTANDVID);
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
-
- return 0;
-}
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1029,7 +1015,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
+ IS_BROADWELL(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1048,7 +1035,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
reqf >>= 24;
else
reqf >>= 25;
@@ -1065,7 +1052,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1121,20 +1108,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max overclocked frequency: %dMHz\n",
dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
} else if (IS_VALLEYVIEW(dev)) {
- u32 freq_sts, val;
+ u32 freq_sts;
mutex_lock(&dev_priv->rps.hw_lock);
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
- val = valleyview_rps_max_freq(dev_priv);
seq_printf(m, "max GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, val));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
- val = valleyview_rps_min_freq(dev_priv);
seq_printf(m, "min GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, val));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+
+ seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
seq_printf(m, "current GPU freq: %d MHz\n",
vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
@@ -1148,61 +1136,6 @@ out:
return ret;
}
-static int i915_delayfreq_table(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 delayfreq;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- for (i = 0; i < 16; i++) {
- delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
- seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
- (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
- }
-
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static inline int MAP_TO_MV(int map)
-{
- return 1250 - (map * 25);
-}
-
-static int i915_inttoext_table(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 inttoext;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- for (i = 1; i <= 32; i++) {
- inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
- seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
- }
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
@@ -1513,10 +1446,17 @@ static int i915_ips_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "enabled\n");
- else
- seq_puts(m, "disabled\n");
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ yesno(i915.enable_ips));
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (I915_READ(IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
intel_runtime_pm_put(dev_priv);
@@ -1620,26 +1560,6 @@ out:
return ret;
}
-static int i915_gfxec(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int i915_opregion(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1677,9 +1597,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (ret)
- return ret;
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
@@ -1692,7 +1609,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
- mutex_unlock(&dev->mode_config.mutex);
#endif
mutex_lock(&dev->mode_config.fb_lock);
@@ -1723,7 +1639,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct intel_context *ctx;
int ret, i;
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1740,7 +1656,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- if (ctx->obj == NULL)
+ if (ctx->legacy_hw_ctx.rcs_state == NULL)
continue;
seq_puts(m, "HW context ");
@@ -1749,11 +1665,11 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ", ring->name);
- describe_obj(m, ctx->obj);
+ describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
seq_putc(m, '\n');
}
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1863,7 +1779,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
if (i915_gem_context_is_default(ctx))
seq_puts(m, " default context:\n");
else
- seq_printf(m, " context %d:\n", ctx->id);
+ seq_printf(m, " context %d:\n", ctx->user_handle);
ppgtt->debug_dump(ppgtt, m);
return 0;
@@ -1976,17 +1892,25 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->psr.lock);
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
+ seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
+ seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
+ seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
+ dev_priv->psr.busy_frontbuffer_bits);
+ seq_printf(m, "Re-enable work scheduled: %s\n",
+ yesno(work_busy(&dev_priv->psr.work.work)));
enabled = HAS_PSR(dev) &&
I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- seq_printf(m, "Enabled: %s\n", yesno(enabled));
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
if (HAS_PSR(dev))
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -2072,7 +1996,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
- yesno(dev_priv->pm.irqs_disabled));
+ yesno(!intel_irqs_enabled(dev_priv)));
return 0;
}
@@ -2126,6 +2050,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "VGA";
case POWER_DOMAIN_AUDIO:
return "AUDIO";
+ case POWER_DOMAIN_PLLS:
+ return "PLLS";
case POWER_DOMAIN_INIT:
return "INIT";
default:
@@ -2223,9 +2149,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
- seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
- crtc->primary->fb->base.id, crtc->x, crtc->y,
- crtc->primary->fb->width, crtc->primary->fb->height);
+ if (crtc->primary->fb)
+ seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+ crtc->primary->fb->base.id, crtc->x, crtc->y,
+ crtc->primary->fb->width, crtc->primary->fb->height);
+ else
+ seq_puts(m, "\tprimary plane disabled\n");
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
intel_encoder_info(m, intel_crtc, intel_encoder);
}
@@ -2287,13 +2216,15 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "\tCEA rev: %d\n",
connector->display_info.cea_rev);
}
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP)
- intel_dp_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
- intel_hdmi_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
- intel_lvds_info(m, intel_connector);
+ if (intel_encoder) {
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ intel_dp_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
+ intel_hdmi_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+ }
seq_printf(m, "\tmodes:\n");
list_for_each_entry(mode, &connector->modes, head)
@@ -2347,17 +2278,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
bool active;
int x, y;
- seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
+ seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active));
+ yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
if (crtc->active) {
intel_crtc_info(m, crtc);
active = cursor_position(dev, crtc->pipe, &x, &y);
- seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
+ seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
- x, y, crtc->cursor_addr,
- yesno(active));
+ x, y, crtc->cursor_width, crtc->cursor_height,
+ crtc->cursor_addr, yesno(active));
}
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
@@ -2377,12 +2308,132 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_semaphore_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ int i, j, ret;
+
+ if (!i915_semaphore_is_enabled(dev)) {
+ seq_puts(m, "Semaphores are disabled\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ if (IS_BROADWELL(dev)) {
+ struct page *page;
+ uint64_t *seqno;
+
+ page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+
+ seqno = (uint64_t *)kmap_atomic(page);
+ for_each_ring(ring, dev_priv, i) {
+ uint64_t offset;
+
+ seq_printf(m, "%s\n", ring->name);
+
+ seq_puts(m, " Last signal:");
+ for (j = 0; j < num_rings; j++) {
+ offset = i * I915_NUM_RINGS + j;
+ seq_printf(m, "0x%08llx (0x%02llx) ",
+ seqno[offset], offset * 8);
+ }
+ seq_putc(m, '\n');
+
+ seq_puts(m, " Last wait: ");
+ for (j = 0; j < num_rings; j++) {
+ offset = i + (j * I915_NUM_RINGS);
+ seq_printf(m, "0x%08llx (0x%02llx) ",
+ seqno[offset], offset * 8);
+ }
+ seq_putc(m, '\n');
+
+ }
+ kunmap_atomic(seqno);
+ } else {
+ seq_puts(m, " Last signal:");
+ for_each_ring(ring, dev_priv, i)
+ for (j = 0; j < num_rings; j++)
+ seq_printf(m, "0x%08x\n",
+ I915_READ(ring->semaphore.mbox.signal[j]));
+ seq_putc(m, '\n');
+ }
+
+ seq_puts(m, "\nSync seqno:\n");
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < num_rings; j++) {
+ seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
+ }
+ seq_putc(m, '\n');
+ }
+ seq_putc(m, '\n');
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ drm_modeset_lock_all(dev);
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
+ seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
+ pll->active, yesno(pll->on));
+ seq_printf(m, " tracked hardware state:\n");
+ seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
enum pipe pipe;
};
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *intel_dig_port;
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = to_intel_encoder(encoder);
+ if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+ continue;
+ intel_dig_port = enc_to_dig_port(encoder);
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ }
+ drm_modeset_unlock_all(dev);
+ return 0;
+}
+
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
@@ -2849,7 +2900,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+ drm_modeset_lock_all(dev);
+ /*
+ * If we use the eDP transcoder we need to make sure that we don't
+ * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+ * relevant on hsw with pipe A when using the always-on power well
+ * routing.
+ */
+ if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
+ !crtc->config.pch_pfit.enabled) {
+ crtc->config.pch_pfit.force_thru = true;
+
+ intel_display_power_get(dev_priv,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+ drm_modeset_lock_all(dev);
+ /*
+ * If we use the eDP transcoder we need to make sure that we don't
+ * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+ * relevant on hsw with pipe A when using the always-on power well
+ * routing.
+ */
+ if (crtc->config.pch_pfit.force_thru) {
+ crtc->config.pch_pfit.force_thru = false;
+
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+
+ intel_display_power_put(dev_priv,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
uint32_t *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -2863,6 +2967,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
+ if (IS_HASWELL(dev) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev);
+
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -2895,11 +3002,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
else if (INTEL_INFO(dev)->gen < 5)
ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_VALLEYVIEW(dev))
- ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+ ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_GEN5(dev) || IS_GEN6(dev))
ret = ilk_pipe_crc_ctl_reg(&source, &val);
else
- ret = ivb_pipe_crc_ctl_reg(&source, &val);
+ ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
if (ret != 0)
return ret;
@@ -2929,11 +3036,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
- intel_wait_for_vblank(dev, pipe);
+ drm_modeset_lock(&crtc->base.mutex, NULL);
+ if (crtc->active)
+ intel_wait_for_vblank(dev, pipe);
+ drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
entries = pipe_crc->entries;
@@ -2946,6 +3058,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
g4x_undo_pipe_scramble_reset(dev, pipe);
else if (IS_VALLEYVIEW(dev))
vlv_undo_pipe_scramble_reset(dev, pipe);
+ else if (IS_HASWELL(dev) && pipe == PIPE_A)
+ hsw_undo_trans_edp_pipe_A_crc_wa(dev);
}
return 0;
@@ -3177,7 +3291,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, pri_wm_latency_show, dev);
@@ -3187,7 +3301,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, spr_wm_latency_show, dev);
@@ -3197,7 +3311,7 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, cur_wm_latency_show, dev);
@@ -3506,7 +3620,7 @@ i915_max_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3532,7 +3646,7 @@ i915_max_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3549,8 +3663,8 @@ i915_max_freq_set(void *data, u64 val)
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
@@ -3587,7 +3701,7 @@ i915_min_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3613,7 +3727,7 @@ i915_min_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3630,8 +3744,8 @@ i915_min_freq_set(void *data, u64 val)
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
@@ -3799,14 +3913,10 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
- {"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_delayfreq_table", i915_delayfreq_table, 0},
- {"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -3823,6 +3933,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_pc8_status", i915_pc8_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_semaphore_status", i915_semaphore_status, 0},
+ {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+ {"i915_dp_mst_info", i915_dp_mst_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index cac9265f9757..2e7f03ad5ee2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev)
I915_WRITE(HWS_PGA, 0x1ffff000);
}
-void i915_kernel_lost_context(struct drm_device * dev)
+void i915_kernel_lost_context(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
@@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
-static int i915_dma_cleanup(struct drm_device * dev)
+static int i915_dma_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
@@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
return 0;
}
-static int i915_dma_resume(struct drm_device * dev)
+static int i915_dma_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = LP_RING(dev_priv);
@@ -359,7 +359,7 @@ static int validate_cmd(int cmd)
return 0;
}
-static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
+static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
@@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
for (i = 0; i < dwords;) {
int sz = validate_cmd(buffer[i]);
+
if (sz == 0 || i + sz > dwords)
return -EINVAL;
i += sz;
@@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
}
}
-static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+static int i915_dispatch_cmdbuffer(struct drm_device *dev,
drm_i915_cmdbuffer_t *cmd,
struct drm_clip_rect *cliprects,
void *cmdbuf)
@@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
return 0;
}
-static int i915_dispatch_batchbuffer(struct drm_device * dev,
- drm_i915_batchbuffer_t * batch,
+static int i915_dispatch_batchbuffer(struct drm_device *dev,
+ drm_i915_batchbuffer_t *batch,
struct drm_clip_rect *cliprects)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
return 0;
}
-static int i915_dispatch_flip(struct drm_device * dev)
+static int i915_dispatch_flip(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
@@ -755,7 +756,7 @@ fail_batch_free:
return ret;
}
-static int i915_emit_irq(struct drm_device * dev)
+static int i915_emit_irq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->dri1.counter;
}
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+static int i915_wait_irq(struct drm_device *dev, int irq_nr)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
{
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
if (state == VGA_SWITCHEROO_ON) {
pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1338,6 +1340,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_gem_stolen;
+ dev_priv->pm._irqs_disabled = false;
+
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
@@ -1422,15 +1426,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
}
#if IS_ENABLED(CONFIG_FB)
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
bool primary;
+ int ret;
ap = alloc_apertures(1);
if (!ap)
- return;
+ return -ENOMEM;
ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = dev_priv->gtt.mappable_end;
@@ -1438,13 +1443,16 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
- remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
kfree(ap);
+
+ return ret;
}
#else
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
+ return 0;
}
#endif
@@ -1461,12 +1469,13 @@ static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
#else
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
- int ret;
+ int ret = 0;
DRM_INFO("Replacing VGA console driver\n");
console_lock();
- ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+ if (con_is_bound(&vga_con))
+ ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
if (ret == 0) {
ret = do_unregister_con_driver(&vga_con);
@@ -1488,10 +1497,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
info->gen,
dev_priv->dev->pdev->device,
+ dev_priv->dev->pdev->revision,
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
@@ -1590,7 +1600,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev_priv == NULL)
return -ENOMEM;
- dev->dev_private = (void *)dev_priv;
+ dev->dev_private = dev_priv;
dev_priv->dev = dev;
/* copy initial configuration to dev_priv->info */
@@ -1602,6 +1612,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
@@ -1660,7 +1671,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gtt;
}
- i915_kick_out_firmware_fb(dev_priv);
+ ret = i915_kick_out_firmware_fb(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ goto out_gtt;
+ }
}
pci_set_master(dev->pdev);
@@ -1713,6 +1728,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
+ dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->dp_wq == NULL) {
+ DRM_ERROR("Failed to create our dp workqueue.\n");
+ ret = -ENOMEM;
+ goto out_freewq;
+ }
+
intel_irq_init(dev);
intel_uncore_sanitize(dev);
@@ -1788,6 +1810,8 @@ out_gem_unload:
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
+ destroy_workqueue(dev_priv->dp_wq);
+out_freewq:
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1888,6 +1912,7 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
+ destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1929,7 +1954,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
-void i915_driver_lastclose(struct drm_device * dev)
+void i915_driver_lastclose(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1950,11 +1975,11 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_dma_cleanup(dev);
}
-void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
{
mutex_lock(&dev->struct_mutex);
- i915_gem_context_close(dev, file_priv);
- i915_gem_release(dev, file_priv);
+ i915_gem_context_close(dev, file);
+ i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
}
@@ -2027,7 +2052,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
* manage the gtt, we need to claim that all intel devices are agp. For
* otherwise the drm core refuses to initialize the agp support code.
*/
-int i915_driver_device_is_agp(struct drm_device * dev)
+int i915_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d935ab3718e1..6c4b25ce8bb0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -28,6 +28,7 @@
*/
#include <linux/device.h>
+#include <linux/acpi.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -46,8 +47,6 @@ static struct drm_driver driver;
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
- .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
- .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
#define GEN_CHV_PIPEOFFSETS \
@@ -55,10 +54,6 @@ static struct drm_driver driver;
CHV_PIPE_C_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
CHV_TRANSCODER_C_OFFSET, }, \
- .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
- CHV_DPLL_C_OFFSET }, \
- .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
- CHV_DPLL_C_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
CHV_PALETTE_C_OFFSET }
@@ -308,6 +303,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -319,6 +315,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -330,6 +327,7 @@ static const struct intel_device_info intel_broadwell_gt3d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -341,6 +339,7 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -482,10 +481,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.semaphores >= 0)
return i915.semaphores;
- /* Until we get further testing... */
- if (IS_GEN8(dev))
- return false;
-
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
@@ -499,8 +494,7 @@ static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
-
- intel_runtime_pm_get(dev_priv);
+ pci_power_t opregion_target_state;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
@@ -526,20 +520,23 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
- drm_irq_uninstall(dev);
-
- intel_disable_gt_powersave(dev);
-
/*
* Disable CRTCs directly since we want to preserve sw state
- * for _thaw.
+ * for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
- for_each_crtc(dev, crtc) {
- dev_priv->display.crtc_disable(crtc);
- }
+ for_each_crtc(dev, crtc)
+ intel_crtc_control(crtc, false);
drm_modeset_unlock_all(dev);
+ intel_dp_mst_suspend(dev);
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ intel_runtime_pm_disable_interrupts(dev);
+
+ intel_suspend_gt_powersave(dev);
+
intel_modeset_suspend_hw(dev);
}
@@ -547,8 +544,15 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
+ opregion_target_state = PCI_D3cold;
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ opregion_target_state = PCI_D1;
+#endif
+ intel_opregion_notify_adapter(dev, opregion_target_state);
+
+ intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
- intel_uncore_fini(dev);
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
@@ -556,6 +560,8 @@ static int i915_drm_freeze(struct drm_device *dev)
dev_priv->suspend_count++;
+ intel_display_set_init_power(dev_priv, false);
+
return 0;
}
@@ -605,7 +611,10 @@ static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_uncore_early_sanitize(dev);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hsw_disable_pc8(dev_priv);
+
+ intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
@@ -638,11 +647,19 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
}
mutex_unlock(&dev->struct_mutex);
- /* We need working interrupts for modeset enabling ... */
- drm_irq_install(dev, dev->pdev->irq);
+ intel_runtime_pm_restore_interrupts(dev);
intel_modeset_init_hw(dev);
+ {
+ unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
@@ -676,7 +693,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_opregion_notify_adapter(dev, PCI_D0);
+
return 0;
}
@@ -885,6 +903,7 @@ static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -898,6 +917,9 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
+ hsw_enable_pc8(dev_priv);
+
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5484f052d50c..5de27f9b8c26 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080730"
+#define DRIVER_DATE "20140620"
enum pipe {
INVALID_PIPE = -1,
@@ -129,6 +129,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_PLLS,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -178,14 +179,20 @@ enum hpd_pin {
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
if ((intel_connector)->base.encoder == (__encoder))
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ if ((1 << (domain)) & (mask))
+
struct drm_i915_private;
struct i915_mmu_object;
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
- DPLL_ID_PCH_PLL_A,
- DPLL_ID_PCH_PLL_B,
+ DPLL_ID_PCH_PLL_A = 0,
+ DPLL_ID_PCH_PLL_B = 1,
+ DPLL_ID_WRPLL1 = 0,
+ DPLL_ID_WRPLL2 = 1,
};
#define I915_NUM_PLLS 2
@@ -194,6 +201,7 @@ struct intel_dpll_hw_state {
uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
+ uint32_t wrpll;
};
struct intel_shared_dpll {
@@ -204,6 +212,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
+ /* The mode_set hook is optional and should be used together with the
+ * intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv,
@@ -228,12 +238,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n);
-struct intel_ddi_plls {
- int spll_refcount;
- int wrpll1_refcount;
- int wrpll2_refcount;
-};
-
/* Interface history:
*
* 1.1: Original.
@@ -324,6 +328,7 @@ struct drm_i915_error_state {
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
+ struct drm_i915_error_object *semaphore_obj;
struct drm_i915_error_ring {
bool valid;
@@ -435,8 +440,8 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enable, bool scaled);
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@@ -552,8 +557,6 @@ struct intel_device_info {
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
- int dpll_offsets[I915_MAX_PIPES];
- int dpll_md_offsets[I915_MAX_PIPES];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
};
@@ -586,28 +589,48 @@ struct i915_ctx_hang_stats {
};
/* This must match up with the value previously used for execbuf2.rsvd1. */
-#define DEFAULT_CONTEXT_ID 0
+#define DEFAULT_CONTEXT_HANDLE 0
+/**
+ * struct intel_context - as the name implies, represents a context.
+ * @ref: reference count.
+ * @user_handle: userspace tracking identity for this context.
+ * @remap_slice: l3 row remapping information.
+ * @file_priv: filp associated with this context (NULL for global default
+ * context).
+ * @hang_stats: information about the role of this context in possible GPU
+ * hangs.
+ * @vm: virtual memory space used by this context.
+ * @legacy_hw_ctx: render context backing object and whether it is correctly
+ * initialized (legacy ring submission mechanism only).
+ * @link: link in the global list of contexts.
+ *
+ * Contexts are memory images used by the hardware to store copies of their
+ * internal state.
+ */
struct intel_context {
struct kref ref;
- int id;
- bool is_initialized;
+ int user_handle;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_engine_cs *last_ring;
- struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
+ struct {
+ struct drm_i915_gem_object *rcs_state;
+ bool initialized;
+ } legacy_hw_ctx;
+
struct list_head link;
};
struct i915_fbc {
unsigned long size;
+ unsigned threshold;
unsigned int fb_id;
enum plane plane;
int y;
- struct drm_mm_node *compressed_fb;
+ struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
struct intel_fbc_work {
@@ -635,9 +658,15 @@ struct i915_drrs {
struct intel_connector *connector;
};
+struct intel_dp;
struct i915_psr {
+ struct mutex lock;
bool sink_support;
bool source_ok;
+ struct intel_dp *enabled;
+ bool active;
+ struct delayed_work work;
+ unsigned busy_frontbuffer_bits;
};
enum intel_pch {
@@ -656,6 +685,7 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_BACKLIGHT_PRESENT (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
@@ -879,6 +909,12 @@ struct vlv_s0ix_state {
u32 clock_gate_dis2;
};
+struct intel_rps_ei {
+ u32 cz_clock;
+ u32 render_c0;
+ u32 media_c0;
+};
+
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
@@ -902,6 +938,9 @@ struct intel_gen6_power_mgmt {
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
+ u32 cz_freq;
+
+ u32 ei_interrupt_count;
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -909,6 +948,9 @@ struct intel_gen6_power_mgmt {
bool enabled;
struct delayed_work delayed_resume_work;
+ /* manual wa residency calculations */
+ struct intel_rps_ei up_ei, down_ei;
+
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
@@ -1229,6 +1271,7 @@ struct intel_vbt_data {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
+ u8 min_brightness; /* min_brightness/255 of max */
} backlight;
/* MIPI DSI */
@@ -1298,7 +1341,7 @@ struct ilk_wm_values {
*/
struct i915_runtime_pm {
bool suspended;
- bool irqs_disabled;
+ bool _irqs_disabled;
};
enum intel_pipe_crc_source {
@@ -1331,6 +1374,17 @@ struct intel_pipe_crc {
wait_queue_head_t wq;
};
+struct i915_frontbuffer_tracking {
+ struct mutex lock;
+
+ /*
+ * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+ * scheduled flips.
+ */
+ unsigned busy_bits;
+ unsigned flip_bits;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1362,6 +1416,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_engine_cs ring[I915_NUM_RINGS];
+ struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -1370,6 +1425,9 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ /* protects the mmio flip data */
+ spinlock_t mmio_flip_lock;
+
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1465,7 +1523,6 @@ struct drm_i915_private {
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- struct intel_ddi_plls ddi_plls;
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/* Reclocking support */
@@ -1473,6 +1530,9 @@ struct drm_i915_private {
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
+
+ struct i915_frontbuffer_tracking fb_tracking;
+
u16 orig_clock;
bool mchbar_need_disable;
@@ -1539,6 +1599,20 @@ struct drm_i915_private {
struct i915_runtime_pm pm;
+ struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
+ u32 long_hpd_port_mask;
+ u32 short_hpd_port_mask;
+ struct work_struct dig_port_work;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
@@ -1590,6 +1664,28 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *);
};
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-vise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+#define INTEL_FRONTBUFFER_BITS \
+ (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+ (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER_CURSOR(pipe) \
+ (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe) \
+ (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+ (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+
struct drm_i915_gem_object {
struct drm_gem_object base;
@@ -1660,6 +1756,12 @@ struct drm_i915_gem_object {
unsigned int pin_display:1;
/*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+
+ /*
* Is the GPU currently using a fence to access this buffer,
*/
unsigned int pending_fenced_gpu_access:1;
@@ -1671,6 +1773,8 @@ struct drm_i915_gem_object {
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
+ unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+
struct sg_table *pages;
int pages_pin_count;
@@ -1717,6 +1821,10 @@ struct drm_i915_gem_object {
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits);
+
/**
* Request queue structure.
*
@@ -1938,10 +2046,8 @@ struct drm_i915_cmd_table {
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
- (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
- && !IS_GEN8(dev))
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
@@ -1996,6 +2102,8 @@ struct drm_i915_cmd_table {
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
+
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
@@ -2038,6 +2146,8 @@ struct i915_params {
bool reset;
bool disable_display;
bool disable_vtd_wa;
+ int use_mmio_flip;
+ bool mmio_debug;
};
extern struct i915_params i915 __read_mostly;
@@ -2046,12 +2156,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
-extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
+ struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev,
- struct drm_file *file_priv);
+ struct drm_file *file);
extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -2082,10 +2192,12 @@ extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
+extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2233,6 +2345,8 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
@@ -2402,7 +2516,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx)
static inline bool i915_gem_context_is_default(const struct intel_context *c)
{
- return c->id == DEFAULT_CONTEXT_ID;
+ return c->user_handle == DEFAULT_CONTEXT_HANDLE;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -2433,7 +2547,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
@@ -2443,7 +2557,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 stolen_offset,
u32 gtt_offset,
u32 size);
-void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -2591,8 +2704,8 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
-extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
-extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
+extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
+ bool enable);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
@@ -2603,6 +2716,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
@@ -2698,10 +2813,10 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
{
- if (HAS_PCH_SPLIT(dev))
- return CPU_VGACNTRL;
- else if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL;
+ else if (INTEL_INFO(dev)->gen >= 5)
+ return CPU_VGACNTRL;
else
return VGACNTRL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d86b77e905a2..dcd8d7b42552 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
-static int
+int
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
@@ -1161,14 +1161,14 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned long timeout_expire;
int ret;
- WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
+ WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
- if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
+ if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
if (file_priv)
mod_delayed_work(dev_priv->wq,
@@ -1561,14 +1561,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unpin;
- obj->fault_mappable = true;
-
+ /* Finally, remap it using the new GTT offset */
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
pfn >>= PAGE_SHIFT;
- pfn += page_offset;
- /* Finally, remap it using the new GTT offset */
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ if (!obj->fault_mappable) {
+ unsigned long size = min_t(unsigned long,
+ vma->vm_end - vma->vm_start,
+ obj->base.size);
+ int i;
+
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vma->vm_start + i * PAGE_SIZE,
+ pfn + i);
+ if (ret)
+ break;
+ }
+
+ obj->fault_mappable = true;
+ } else
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vmf->virtual_address,
+ pfn + page_offset);
unpin:
i915_gem_object_ggtt_unpin(obj);
unlock:
@@ -1616,22 +1631,6 @@ out:
return ret;
}
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
- struct i915_vma *vma;
-
- /*
- * Only the global gtt is relevant for gtt memory mappings, so restrict
- * list traversal to objects bound into the global address space. Note
- * that the active list should be empty, but better safe than sorry.
- */
- WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
- list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
- i915_gem_release_mmap(vma->obj);
- list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
- i915_gem_release_mmap(vma->obj);
-}
-
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
@@ -1657,6 +1656,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ i915_gem_release_mmap(obj);
+}
+
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
@@ -2211,6 +2219,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
list_move_tail(&vma->mm_list, &vm->inactive_list);
}
+ intel_fb_obj_flush(obj, true);
+
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2320,7 +2330,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 request_ring_position, request_start;
int ret;
- request_start = intel_ring_get_tail(ring);
+ request_start = intel_ring_get_tail(ring->buffer);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2341,7 +2351,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ring);
+ request_ring_position = intel_ring_get_tail(ring->buffer);
ret = ring->add_request(ring);
if (ret)
@@ -2832,6 +2842,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to);
seqno = obj->last_read_seqno;
+ /* Optimization: Avoid semaphore sync when we are sure we already
+ * waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
@@ -2915,8 +2927,6 @@ int i915_vma_unbind(struct i915_vma *vma)
vma->unbind_vma(vma);
- i915_gem_gtt_finish_object(obj);
-
list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
@@ -2927,8 +2937,10 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
- if (list_empty(&obj->vma_list))
+ if (list_empty(&obj->vma_list)) {
+ i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ }
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -3540,6 +3552,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
+ intel_fb_obj_flush(obj, false);
+
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
@@ -3561,6 +3575,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
+ intel_fb_obj_flush(obj, false);
+
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
@@ -3614,6 +3630,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->dirty = 1;
}
+ if (write)
+ intel_fb_obj_invalidate(obj, NULL);
+
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -3950,6 +3969,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
+ if (write)
+ intel_fb_obj_invalidate(obj, NULL);
+
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -4438,13 +4460,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
+ WARN_ON(obj->frontbuffer_bits);
+
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
- i915_gem_object_release_stolen(obj);
BUG_ON(obj->pages);
@@ -4922,6 +4945,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
+
+ mutex_init(&dev_priv->fb_tracking.lock);
}
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4983,6 +5008,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return ret;
}
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits)
+{
+ if (old) {
+ WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
+ WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
+ old->frontbuffer_bits &= ~frontbuffer_bits;
+ }
+
+ if (new) {
+ WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
+ WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
+ new->frontbuffer_bits |= frontbuffer_bits;
+ }
+}
+
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
@@ -5065,12 +5107,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
- BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
return vma->node.start;
}
+ WARN(1, "%s vma for this object not found.\n",
+ i915_is_ggtt(vm) ? "global" : "ppgtt");
return -1;
}
@@ -5151,8 +5194,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
bool was_interruptible;
bool unlock;
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
schedule_timeout_killable(1);
+ if (fatal_signal_pending(current))
+ return NOTIFY_DONE;
+ }
if (timeout == 0) {
pr_err("Unable to purge GPU memory due lock contention.\n");
return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5ddf3bce9c3..3b99390e467a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -182,22 +182,50 @@ void i915_gem_context_free(struct kref *ctx_ref)
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
- if (ctx->obj) {
+ if (ctx->legacy_hw_ctx.rcs_state) {
/* We refcount even the aliasing PPGTT to keep the code symmetric */
- if (USES_PPGTT(ctx->obj->base.dev))
+ if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
ppgtt = ctx_to_ppgtt(ctx);
-
- /* XXX: Free up the object before tearing down the address space, in
- * case we're bound in the PPGTT */
- drm_gem_object_unreference(&ctx->obj->base);
}
if (ppgtt)
kref_put(&ppgtt->ref, ppgtt_release);
+ if (ctx->legacy_hw_ctx.rcs_state)
+ drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link);
kfree(ctx);
}
+static struct drm_i915_gem_object *
+i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, size);
+ if (obj == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ */
+ if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+ /* Failure shouldn't ever happen this early */
+ if (WARN_ON(ret)) {
+ drm_gem_object_unreference(&obj->base);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return obj;
+}
+
static struct i915_hw_ppgtt *
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{
@@ -234,40 +262,26 @@ __create_hw_context(struct drm_device *dev,
list_add_tail(&ctx->link, &dev_priv->context_list);
if (dev_priv->hw_context_size) {
- ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
- if (ctx->obj == NULL) {
- ret = -ENOMEM;
+ struct drm_i915_gem_object *obj =
+ i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto err_out;
}
-
- /*
- * Try to make the context utilize L3 as well as LLC.
- *
- * On VLV we don't have L3 controls in the PTEs so we
- * shouldn't touch the cache level, especially as that
- * would make the object snooped which might have a
- * negative performance impact.
- */
- if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
- ret = i915_gem_object_set_cache_level(ctx->obj,
- I915_CACHE_L3_LLC);
- /* Failure shouldn't ever happen this early */
- if (WARN_ON(ret))
- goto err_out;
- }
+ ctx->legacy_hw_ctx.rcs_state = obj;
}
/* Default context will never have a file_priv */
if (file_priv != NULL) {
ret = idr_alloc(&file_priv->context_idr, ctx,
- DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
+ DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
} else
- ret = DEFAULT_CONTEXT_ID;
+ ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv;
- ctx->id = ret;
+ ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
@@ -301,7 +315,7 @@ i915_gem_create_context(struct drm_device *dev,
if (IS_ERR(ctx))
return ctx;
- if (is_global_default_ctx && ctx->obj) {
+ if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -309,7 +323,7 @@ i915_gem_create_context(struct drm_device *dev,
* be available. To avoid this we always pin the default
* context.
*/
- ret = i915_gem_obj_ggtt_pin(ctx->obj,
+ ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -349,8 +363,8 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
err_unpin:
- if (is_global_default_ctx && ctx->obj)
- i915_gem_object_ggtt_unpin(ctx->obj);
+ if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
+ i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
@@ -366,23 +380,27 @@ void i915_gem_context_reset(struct drm_device *dev)
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *dctx = ring->default_context;
+ struct intel_context *lctx = ring->last_context;
/* Do a fake switch to the default context */
- if (ring->last_context == dctx)
+ if (lctx == dctx)
continue;
- if (!ring->last_context)
+ if (!lctx)
continue;
- if (dctx->obj && i == RCS) {
- WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+ if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
+ WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0));
/* Fake a finish/inactive */
- dctx->obj->base.write_domain = 0;
- dctx->obj->active = 0;
+ dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
+ dctx->legacy_hw_ctx.rcs_state->active = 0;
}
- i915_gem_context_unreference(ring->last_context);
+ if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
+ i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
+
+ i915_gem_context_unreference(lctx);
i915_gem_context_reference(dctx);
ring->last_context = dctx;
}
@@ -429,7 +447,7 @@ void i915_gem_context_fini(struct drm_device *dev)
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i;
- if (dctx->obj) {
+ if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
@@ -444,13 +462,13 @@ void i915_gem_context_fini(struct drm_device *dev)
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
- WARN_ON(dctx->obj->active);
- i915_gem_object_ggtt_unpin(dctx->obj);
+ WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
+ i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
- i915_gem_object_ggtt_unpin(dctx->obj);
+ i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -570,7 +588,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -602,16 +620,16 @@ static int do_switch(struct intel_engine_cs *ring,
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
- BUG_ON(from->obj == NULL);
- BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+ BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
+ BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
- if (from == to && from->last_ring == ring && !to->remap_slice)
+ if (from == to && !to->remap_slice)
return 0;
/* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) {
- ret = i915_gem_obj_ggtt_pin(to->obj,
+ ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0);
if (ret)
return ret;
@@ -644,17 +662,17 @@ static int do_switch(struct intel_engine_cs *ring,
*
* XXX: We need a real interface to do this instead of trickery.
*/
- ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
if (ret)
goto unpin_out;
- if (!to->obj->has_global_gtt_mapping) {
- struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+ if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
- vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+ vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
- if (!to->is_initialized || i915_gem_context_is_default(to))
+ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(ring, to, hw_flags);
@@ -680,8 +698,8 @@ static int do_switch(struct intel_engine_cs *ring,
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
+ from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -689,21 +707,20 @@ static int do_switch(struct intel_engine_cs *ring,
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->obj->dirty = 1;
- BUG_ON(from->obj->ring != ring);
+ from->legacy_hw_ctx.rcs_state->dirty = 1;
+ BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->obj);
+ i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from);
}
- uninitialized = !to->is_initialized && from == NULL;
- to->is_initialized = true;
+ uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+ to->legacy_hw_ctx.initialized = true;
done:
i915_gem_context_reference(to);
ring->last_context = to;
- to->last_ring = ring;
if (uninitialized) {
ret = i915_gem_render_state_init(ring);
@@ -715,7 +732,7 @@ done:
unpin_out:
if (ring->id == RCS)
- i915_gem_object_ggtt_unpin(to->obj);
+ i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
@@ -736,7 +753,7 @@ int i915_switch_context(struct intel_engine_cs *ring,
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (to->obj == NULL) { /* We have the fake context */
+ if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) {
i915_gem_context_reference(to);
if (ring->last_context)
@@ -774,7 +791,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- args->ctx_id = ctx->id;
+ args->ctx_id = ctx->user_handle;
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
return 0;
@@ -788,7 +805,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct intel_context *ctx;
int ret;
- if (args->ctx_id == DEFAULT_CONTEXT_ID)
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
ret = i915_mutex_lock_interruptible(dev);
@@ -801,7 +818,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(ctx);
}
- idr_remove(&ctx->file_priv->context_idr, ctx->id);
+ idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3a30133f93e8..2dd19da6b4b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
- if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+ if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
- /* check for potential scanout */
- if (i915_gem_obj_ggtt_bound(obj) &&
- i915_gem_obj_to_ggtt(obj)->pin_count)
- intel_mark_fb_busy(obj, ring);
+
+ intel_fb_obj_invalidate(obj, ring);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1028,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+static int
+legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags)
+{
+ struct drm_clip_rect *cliprects = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 exec_len;
+ int instp_mode;
+ u32 instp_mask;
+ int i, ret = 0;
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+ DRM_DEBUG("execbuf with %u cliprects\n",
+ args->num_cliprects);
+ return -EINVAL;
+ }
+
+ cliprects = kcalloc(args->num_cliprects,
+ sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (copy_from_user(cliprects,
+ to_user_ptr(args->cliprects_ptr),
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto error;
+ }
+ } else {
+ if (args->DR4 == 0xffffffff) {
+ DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+ args->DR4 = 0;
+ }
+
+ if (args->DR1 || args->DR4 || args->cliprects_ptr) {
+ DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ if (ret)
+ goto error;
+
+ ret = i915_switch_context(ring, ctx);
+ if (ret)
+ goto error;
+
+ instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ instp_mask = I915_EXEC_CONSTANTS_MASK;
+ switch (instp_mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (instp_mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("no rel constants on pre-gen4\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+ DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (ring == &dev_priv->ring[RCS] &&
+ instp_mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto error;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = instp_mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto error;
+ }
+
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto error;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ goto error;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ return ret;
+ }
+
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(vmas, ring);
+ i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+
+error:
+ kfree(cliprects);
+ return ret;
+}
+
/**
* Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned.
@@ -1087,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
- struct drm_clip_rect *cliprects = NULL;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
- u64 exec_start = args->batch_start_offset, exec_len;
- u32 mask, flags;
- int ret, mode, i;
+ u64 exec_start = args->batch_start_offset;
+ u32 flags;
+ int ret;
bool need_relocs;
if (!i915_gem_check_execbuffer(args))
@@ -1106,7 +1260,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
- if (!file->is_master || !capable(CAP_SYS_ADMIN))
+ if (!drm_is_master(file) || !capable(CAP_SYS_ADMIN))
return -EPERM;
flags |= I915_DISPATCH_SECURE;
@@ -1138,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- mode = args->flags & I915_EXEC_CONSTANTS_MASK;
- mask = I915_EXEC_CONSTANTS_MASK;
- switch (mode) {
- case I915_EXEC_CONSTANTS_REL_GENERAL:
- case I915_EXEC_CONSTANTS_ABSOLUTE:
- case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (mode != 0 && ring != &dev_priv->ring[RCS]) {
- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
- return -EINVAL;
- }
-
- if (mode != dev_priv->relative_constants_mode) {
- if (INTEL_INFO(dev)->gen < 4) {
- DRM_DEBUG("no rel constants on pre-gen4\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen > 5 &&
- mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
- return -EINVAL;
- }
-
- /* The HW changed the meaning on this bit on gen6 */
- if (INTEL_INFO(dev)->gen >= 6)
- mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
- }
- break;
- default:
- DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
- return -EINVAL;
- }
-
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- if (args->num_cliprects != 0) {
- if (ring != &dev_priv->ring[RCS]) {
- DRM_DEBUG("clip rectangles are only valid with the render ring\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen >= 5) {
- DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
- return -EINVAL;
- }
-
- if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
- DRM_DEBUG("execbuf with %u cliprects\n",
- args->num_cliprects);
- return -EINVAL;
- }
-
- cliprects = kcalloc(args->num_cliprects,
- sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- if (copy_from_user(cliprects,
- to_user_ptr(args->cliprects_ptr),
- sizeof(*cliprects)*args->num_cliprects)) {
- ret = -EFAULT;
- goto pre_mutex_err;
- }
- } else {
- if (args->DR4 == 0xffffffff) {
- DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
- args->DR4 = 0;
- }
-
- if (args->DR1 || args->DR4 || args->cliprects_ptr) {
- DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
- return -EINVAL;
- }
- }
-
intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
@@ -1291,7 +1369,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_parse_cmds(ring,
batch_obj,
args->batch_start_offset,
- file->is_master);
+ drm_is_master(file));
if (ret)
goto err;
@@ -1322,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
else
exec_start += i915_gem_obj_offset(batch_obj, vm);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
+ ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
+ args, &eb->vmas, batch_obj, exec_start, flags);
if (ret)
goto err;
- ret = i915_switch_context(ring, ctx);
- if (ret)
- goto err;
-
- if (ring == &dev_priv->ring[RCS] &&
- mode != dev_priv->relative_constants_mode) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- goto err;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, INSTPM);
- intel_ring_emit(ring, mask << 16 | mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = mode;
- }
-
- if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- ret = i915_reset_gen7_sol_offsets(dev, ring);
- if (ret)
- goto err;
- }
-
-
- exec_len = args->batch_len;
- if (cliprects) {
- for (i = 0; i < args->num_cliprects; i++) {
- ret = i915_emit_box(dev, &cliprects[i],
- args->DR1, args->DR4);
- if (ret)
- goto err;
-
- ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
- if (ret)
- goto err;
- }
- } else {
- ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
- if (ret)
- goto err;
- }
-
- trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
-
- i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
@@ -1387,8 +1413,6 @@ err:
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
- kfree(cliprects);
-
/* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */
intel_runtime_pm_put(dev_priv);
@@ -1525,7 +1549,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- struct drm_i915_gem_exec_object2 *user_exec_list =
+ struct drm_i915_gem_exec_object2 __user *user_exec_list =
to_user_ptr(args->buffers_ptr);
int i;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8b3cde703364..5188936bca0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -63,6 +63,13 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
}
#endif
+ /* Early VLV doesn't have this */
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ dev->pdev->revision < 0xb) {
+ DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
+ return 0;
+ }
+
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
}
@@ -110,7 +117,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -132,7 +139,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -156,7 +163,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 flags)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -164,7 +171,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
- pte |= BYT_PTE_WRITEABLE;
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
if (level != I915_CACHE_NONE)
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
@@ -174,7 +182,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -187,7 +195,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -301,7 +309,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level, u32 unused)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -639,7 +647,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t pd_entry;
int pte, pde;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
@@ -941,7 +949,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -964,7 +972,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level, u32 flags)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -981,7 +989,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
- cache_level, true);
+ cache_level, true, flags);
+
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
@@ -1218,8 +1227,12 @@ ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ /* Currently applicable only to VLV */
+ if (vma->obj->gt_ro)
+ flags |= PTE_READ_ONLY;
+
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
- cache_level);
+ cache_level, flags);
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
@@ -1394,7 +1407,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level level)
+ enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -1440,7 +1453,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level level)
+ enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -1452,7 +1465,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
+ iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
i++;
}
@@ -1464,7 +1477,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) !=
- vm->pte_encode(addr, level, true));
+ vm->pte_encode(addr, level, true, flags));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -1518,7 +1531,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -1567,6 +1580,10 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ /* Currently applicable only to VLV */
+ if (obj->gt_ro)
+ flags |= PTE_READ_ONLY;
+
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
@@ -1583,7 +1600,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
- cache_level);
+ cache_level, flags);
obj->has_global_gtt_mapping = 1;
}
}
@@ -1595,7 +1612,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
- cache_level);
+ cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 1b96a06be3cb..8d6f7c18c404 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -154,6 +154,7 @@ struct i915_vma {
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
#define GLOBAL_BIND (1<<0)
+#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
@@ -197,7 +198,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
- bool valid); /* Create a valid PTE */
+ bool valid, u32 flags); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
@@ -205,7 +206,7 @@ struct i915_address_space {
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level cache_level);
+ enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm);
};
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3521f998a178..e60be3f552a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,64 +28,13 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-struct i915_render_state {
+struct render_state {
+ const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
- unsigned long ggtt_offset;
- void *batch;
- u32 size;
- u32 len;
+ u64 ggtt_offset;
+ int gen;
};
-static struct i915_render_state *render_state_alloc(struct drm_device *dev)
-{
- struct i915_render_state *so;
- struct page *page;
- int ret;
-
- so = kzalloc(sizeof(*so), GFP_KERNEL);
- if (!so)
- return ERR_PTR(-ENOMEM);
-
- so->obj = i915_gem_alloc_object(dev, 4096);
- if (so->obj == NULL) {
- ret = -ENOMEM;
- goto free;
- }
- so->size = 4096;
-
- ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
- if (ret)
- goto free_gem;
-
- BUG_ON(so->obj->pages->nents != 1);
- page = sg_page(so->obj->pages->sgl);
-
- so->batch = kmap(page);
- if (!so->batch) {
- ret = -ENOMEM;
- goto unpin;
- }
-
- so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-
- return so;
-unpin:
- i915_gem_object_ggtt_unpin(so->obj);
-free_gem:
- drm_gem_object_unreference(&so->obj->base);
-free:
- kfree(so);
- return ERR_PTR(ret);
-}
-
-static void render_state_free(struct i915_render_state *so)
-{
- kunmap(so->batch);
- i915_gem_object_ggtt_unpin(so->obj);
- drm_gem_object_unreference(&so->obj->base);
- kfree(so);
-}
-
static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen)
{
@@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL;
}
-static int render_state_setup(const int gen,
- const struct intel_renderstate_rodata *rodata,
- struct i915_render_state *so)
+static int render_state_init(struct render_state *so, struct drm_device *dev)
{
- const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
- u32 reloc_index = 0;
- u32 * const d = so->batch;
- unsigned int i = 0;
int ret;
- if (!rodata || rodata->batch_items * 4 > so->size)
+ so->gen = INTEL_INFO(dev)->gen;
+ so->rodata = render_state_get_rodata(dev, so->gen);
+ if (so->rodata == NULL)
+ return 0;
+
+ if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
+ so->obj = i915_gem_alloc_object(dev, 4096);
+ if (so->obj == NULL)
+ return -ENOMEM;
+
+ ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+ if (ret)
+ goto free_gem;
+
+ so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+ return 0;
+
+free_gem:
+ drm_gem_object_unreference(&so->obj->base);
+ return ret;
+}
+
+static int render_state_setup(struct render_state *so)
+{
+ const struct intel_renderstate_rodata *rodata = so->rodata;
+ unsigned int i = 0, reloc_index = 0;
+ struct page *page;
+ u32 *d;
+ int ret;
+
ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
if (ret)
return ret;
+ page = sg_page(so->obj->pages->sgl);
+ d = kmap(page);
+
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
- if (reloc_index < rodata->reloc_items &&
- i * 4 == rodata->reloc[reloc_index]) {
-
- s += goffset & 0xffffffff;
-
- /* We keep batch offsets max 32bit */
- if (gen >= 8) {
+ if (i * 4 == rodata->reloc[reloc_index]) {
+ u64 r = s + so->ggtt_offset;
+ s = lower_32_bits(r);
+ if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
return -EINVAL;
- d[i] = s;
- i++;
- s = (goffset & 0xffffffff00000000ull) >> 32;
+ d[i++] = s;
+ s = upper_32_bits(r);
}
reloc_index++;
}
- d[i] = s;
- i++;
+ d[i++] = s;
}
+ kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
if (ret)
return ret;
- if (rodata->reloc_items != reloc_index) {
- DRM_ERROR("not all relocs resolved, %d out of %d\n",
- reloc_index, rodata->reloc_items);
+ if (rodata->reloc[reloc_index] != -1) {
+ DRM_ERROR("only %d relocs resolved\n", reloc_index);
return -EINVAL;
}
- so->len = rodata->batch_items * 4;
-
return 0;
}
+static void render_state_fini(struct render_state *so)
+{
+ i915_gem_object_ggtt_unpin(so->obj);
+ drm_gem_object_unreference(&so->obj->base);
+}
+
int i915_gem_render_state_init(struct intel_engine_cs *ring)
{
- const int gen = INTEL_INFO(ring->dev)->gen;
- struct i915_render_state *so;
- const struct intel_renderstate_rodata *rodata;
+ struct render_state so;
int ret;
if (WARN_ON(ring->id != RCS))
return -ENOENT;
- rodata = render_state_get_rodata(ring->dev, gen);
- if (rodata == NULL)
- return 0;
+ ret = render_state_init(&so, ring->dev);
+ if (ret)
+ return ret;
- so = render_state_alloc(ring->dev);
- if (IS_ERR(so))
- return PTR_ERR(so);
+ if (so.rodata == NULL)
+ return 0;
- ret = render_state_setup(gen, rodata, so);
+ ret = render_state_setup(&so);
if (ret)
goto out;
ret = ring->dispatch_execbuffer(ring,
- i915_gem_obj_ggtt_offset(so->obj),
- so->len,
+ so.ggtt_offset,
+ so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so->obj, NULL);
+ ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */
out:
- render_state_free(so);
+ render_state_fini(&so);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 62ef55ba061c..21c025a209c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (base == 0)
return 0;
+ /* make sure we don't clobber the GTT if it's within stolen memory */
+ if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ struct {
+ u32 start, end;
+ } stolen[2] = {
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ };
+ u64 gtt_start, gtt_end;
+
+ gtt_start = I915_READ(PGTBL_CTL);
+ if (IS_GEN4(dev))
+ gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
+ (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+ else
+ gtt_start &= PGTBL_ADDRESS_LO_MASK;
+ gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+
+ if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
+ stolen[0].end = gtt_start;
+ if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
+ stolen[1].start = gtt_end;
+
+ /* pick the larger of the two chunks */
+ if (stolen[0].end - stolen[0].start >
+ stolen[1].end - stolen[1].start) {
+ base = stolen[0].start;
+ dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ } else {
+ base = stolen[1].start;
+ dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ }
+
+ if (stolen[0].start != stolen[1].start ||
+ stolen[0].end != stolen[1].end) {
+ DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
+ (unsigned long long) gtt_start,
+ (unsigned long long) gtt_end - 1);
+ DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
+ base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ }
+ }
+
+
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
@@ -103,30 +147,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return base;
}
-static int i915_setup_compression(struct drm_device *dev, int size)
+static int find_compression_threshold(struct drm_device *dev,
+ struct drm_mm_node *node,
+ int size,
+ int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ int compression_threshold = 1;
int ret;
- compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
- if (!compressed_fb)
- goto err_llb;
+ /* HACK: This code depends on what we will do in *_enable_fbc. If that
+ * code changes, this code needs to change as well.
+ *
+ * The enable_fbc code will attempt to use one of our 2 compression
+ * thresholds, therefore, in that case, we only have 1 resort.
+ */
- /* Try to over-allocate to reduce reallocations and fragmentation */
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ /* Try to over-allocate to reduce reallocations and fragmentation. */
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
- if (ret)
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
- size >>= 1, 4096,
- DRM_MM_SEARCH_DEFAULT);
- if (ret)
+ if (ret == 0)
+ return compression_threshold;
+
+again:
+ /* HW's ability to limit the CFB is 1:4 */
+ if (compression_threshold > 4 ||
+ (fb_cpp == 2 && compression_threshold == 2))
+ return 0;
+
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
+ size >>= 1, 4096,
+ DRM_MM_SEARCH_DEFAULT);
+ if (ret && INTEL_INFO(dev)->gen <= 4) {
+ return 0;
+ } else if (ret) {
+ compression_threshold <<= 1;
+ goto again;
+ } else {
+ return compression_threshold;
+ }
+}
+
+static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *uninitialized_var(compressed_llb);
+ int ret;
+
+ ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
+ size, fb_cpp);
+ if (!ret)
goto err_llb;
+ else if (ret > 1) {
+ DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+ }
+
+ dev_priv->fbc.threshold = ret;
if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
@@ -140,13 +222,12 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + compressed_fb->start);
+ dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->fbc.compressed_fb = compressed_fb;
- dev_priv->fbc.size = size;
+ dev_priv->fbc.size = size / dev_priv->fbc.threshold;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -155,14 +236,13 @@ static int i915_setup_compression(struct drm_device *dev, int size)
err_fb:
kfree(compressed_llb);
- drm_mm_remove_node(compressed_fb);
+ drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb:
- kfree(compressed_fb);
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -175,7 +255,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
- return i915_setup_compression(dev, size);
+ return i915_setup_compression(dev, size, fb_cpp);
}
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
@@ -185,10 +265,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
if (dev_priv->fbc.size == 0)
return;
- if (dev_priv->fbc.compressed_fb) {
- drm_mm_remove_node(dev_priv->fbc.compressed_fb);
- kfree(dev_priv->fbc.compressed_fb);
- }
+ drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
@@ -292,9 +369,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
kfree(obj->pages);
}
+
+static void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+ if (obj->stolen) {
+ drm_mm_remove_node(obj->stolen);
+ kfree(obj->stolen);
+ obj->stolen = NULL;
+ }
+}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
+ .release = i915_gem_object_release_stolen,
};
static struct drm_i915_gem_object *
@@ -452,13 +540,3 @@ err_out:
drm_gem_object_unreference(&obj->base);
return NULL;
}
-
-void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
- if (obj->stolen) {
- drm_mm_remove_node(obj->stolen);
- kfree(obj->stolen);
- obj->stolen = NULL;
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 21ea92886a56..fe69fc837d9e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -40,19 +40,87 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
+ struct list_head linear;
struct drm_device *dev;
struct mm_struct *mm;
struct work_struct work;
unsigned long count;
unsigned long serial;
+ bool has_linear;
};
struct i915_mmu_object {
struct i915_mmu_notifier *mmu;
struct interval_tree_node it;
+ struct list_head link;
struct drm_i915_gem_object *obj;
+ bool is_linear;
};
+static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ unsigned long end;
+
+ mutex_lock(&dev->struct_mutex);
+ /* Cancel any active worker and force us to re-evaluate gup */
+ obj->userptr.work = NULL;
+
+ if (obj->pages != NULL) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_vma *vma, *tmp;
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ WARN_ON(ret && ret != -EIO);
+ }
+ WARN_ON(i915_gem_object_put_pages(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
+ end = obj->userptr.ptr + obj->base.size;
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ return end;
+}
+
+static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct i915_mmu_object *mmu;
+ unsigned long serial;
+
+restart:
+ serial = mn->serial;
+ list_for_each_entry(mmu, &mn->linear, link) {
+ struct drm_i915_gem_object *obj;
+
+ if (mmu->it.last < start || mmu->it.start > end)
+ continue;
+
+ obj = mmu->obj;
+ drm_gem_object_reference(&obj->base);
+ spin_unlock(&mn->lock);
+
+ cancel_userptr(obj);
+
+ spin_lock(&mn->lock);
+ if (serial != mn->serial)
+ goto restart;
+ }
+
+ return NULL;
+}
+
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct mm_struct *mm,
unsigned long start,
@@ -60,16 +128,18 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
{
struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it = NULL;
+ unsigned long next = start;
unsigned long serial = 0;
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
- while (start < end) {
- struct drm_i915_gem_object *obj;
+ while (next < end) {
+ struct drm_i915_gem_object *obj = NULL;
- obj = NULL;
spin_lock(&mn->lock);
- if (serial == mn->serial)
- it = interval_tree_iter_next(it, start, end);
+ if (mn->has_linear)
+ it = invalidate_range__linear(mn, mm, start, end);
+ else if (serial == mn->serial)
+ it = interval_tree_iter_next(it, next, end);
else
it = interval_tree_iter_first(&mn->objects, start, end);
if (it != NULL) {
@@ -81,31 +151,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
if (obj == NULL)
return;
- mutex_lock(&mn->dev->struct_mutex);
- /* Cancel any active worker and force us to re-evaluate gup */
- obj->userptr.work = NULL;
-
- if (obj->pages != NULL) {
- struct drm_i915_private *dev_priv = to_i915(mn->dev);
- struct i915_vma *vma, *tmp;
- bool was_interruptible;
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
- int ret = i915_vma_unbind(vma);
- WARN_ON(ret && ret != -EIO);
- }
- WARN_ON(i915_gem_object_put_pages(obj));
-
- dev_priv->mm.interruptible = was_interruptible;
- }
-
- start = obj->userptr.ptr + obj->base.size;
-
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&mn->dev->struct_mutex);
+ next = cancel_userptr(obj);
}
}
@@ -150,7 +196,9 @@ i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
mmu->mm = mm;
mmu->objects = RB_ROOT;
mmu->count = 0;
- mmu->serial = 0;
+ mmu->serial = 1;
+ INIT_LIST_HEAD(&mmu->linear);
+ mmu->has_linear = false;
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mmu->mn, mm);
@@ -197,6 +245,17 @@ static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
mmu->serial = 1;
}
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
+{
+ struct i915_mmu_object *mn;
+
+ list_for_each_entry(mn, &mmu->linear, link)
+ if (mn->is_linear)
+ return true;
+
+ return false;
+}
+
static void
i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
struct i915_mmu_object *mn)
@@ -204,7 +263,11 @@ i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
lockdep_assert_held(&mmu->dev->struct_mutex);
spin_lock(&mmu->lock);
- interval_tree_remove(&mn->it, &mmu->objects);
+ list_del(&mn->link);
+ if (mn->is_linear)
+ mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
+ else
+ interval_tree_remove(&mn->it, &mmu->objects);
__i915_mmu_notifier_update_serial(mmu);
spin_unlock(&mmu->lock);
@@ -230,7 +293,6 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
*/
i915_gem_retire_requests(mmu->dev);
- /* Disallow overlapping userptr objects */
spin_lock(&mmu->lock);
it = interval_tree_iter_first(&mmu->objects,
mn->it.start, mn->it.last);
@@ -243,14 +305,22 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
* to flush their object references upon which the object will
* be removed from the interval-tree, or the the range is
* still in use by another client and the overlap is invalid.
+ *
+ * If we do have an overlap, we cannot use the interval tree
+ * for fast range invalidation.
*/
obj = container_of(it, struct i915_mmu_object, it)->obj;
- ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
- } else {
+ if (!obj->userptr.workers)
+ mmu->has_linear = mn->is_linear = true;
+ else
+ ret = -EAGAIN;
+ } else
interval_tree_insert(&mn->it, &mmu->objects);
+
+ if (ret == 0) {
+ list_add(&mn->link, &mmu->linear);
__i915_mmu_notifier_update_serial(mmu);
- ret = 0;
}
spin_unlock(&mmu->lock);
mutex_unlock(&mmu->dev->struct_mutex);
@@ -611,12 +681,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
* We impose several restrictions upon the memory being mapped
* into the GPU.
* 1. It must be page aligned (both start/end addresses, i.e ptr and size).
- * 2. It cannot overlap any other userptr object in the same address space.
- * 3. It must be normal system memory, not a pointer into another map of IO
+ * 2. It must be normal system memory, not a pointer into another map of IO
* space (e.g. it must not be a GTT mmapping of another object).
- * 4. We only allow a bo as large as we could in theory map into the GTT,
+ * 3. We only allow a bo as large as we could in theory map into the GTT,
* that is we limit the size to the total size of the GTT.
- * 5. The bo is marked as being snoopable. The backing pages are left
+ * 4. The bo is marked as being snoopable. The backing pages are left
* accessible directly by the CPU, but reads and writes by the GPU may
* incur the cost of a snoop (unless you have an LLC architecture).
*
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 66cf41765bf9..0b3f69439451 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct drm_device *dev = error_priv->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
+ struct drm_i915_error_object *obj;
int i, j, offset, elt;
int max_hangcheck_score;
@@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->pinned_bo_count[0]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- struct drm_i915_error_object *obj;
-
obj = error->ring[i].batchbuffer;
if (obj) {
err_puts(m, dev_priv->ring[i].name);
@@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
+ if ((obj = error->semaphore_obj)) {
+ err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ elt * 4,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ }
+ }
+
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref)
kfree(error->ring[i].requests);
}
+ i915_error_object_free(error->semaphore_obj);
kfree(error->active_bo);
kfree(error->overlay);
kfree(error->display);
@@ -746,7 +758,59 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
}
+
+static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ struct intel_engine_cs *ring,
+ struct drm_i915_error_ring *ering)
+{
+ struct intel_engine_cs *to;
+ int i;
+
+ if (!i915_semaphore_is_enabled(dev_priv->dev))
+ return;
+
+ if (!error->semaphore_obj)
+ error->semaphore_obj =
+ i915_error_object_create(dev_priv,
+ dev_priv->semaphore_obj,
+ &dev_priv->gtt.base);
+
+ for_each_ring(to, dev_priv, i) {
+ int idx;
+ u16 signal_offset;
+ u32 *tmp;
+
+ if (ring == to)
+ continue;
+
+ signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
+ tmp = error->semaphore_obj->pages[0];
+ idx = intel_ring_sync_index(ring, to);
+
+ ering->semaphore_mboxes[idx] = tmp[signal_offset];
+ ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+ }
+}
+
+static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *ring,
+ struct drm_i915_error_ring *ering)
+{
+ ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
+ ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
+ ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
+ ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+
+ if (HAS_VEBOX(dev_priv->dev)) {
+ ering->semaphore_mboxes[2] =
+ I915_READ(RING_SYNC_2(ring->mmio_base));
+ ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ }
+}
+
static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
@@ -755,18 +819,10 @@ static void i915_record_ring_state(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
- ering->semaphore_mboxes[0]
- = I915_READ(RING_SYNC_0(ring->mmio_base));
- ering->semaphore_mboxes[1]
- = I915_READ(RING_SYNC_1(ring->mmio_base));
- ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
- ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
- }
-
- if (HAS_VEBOX(dev)) {
- ering->semaphore_mboxes[2] =
- I915_READ(RING_SYNC_2(ring->mmio_base));
- ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ if (INTEL_INFO(dev)->gen >= 8)
+ gen8_record_semaphore_state(dev_priv, error, ring, ering);
+ else
+ gen6_record_semaphore_state(dev_priv, ring, ering);
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -871,6 +927,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!i915_gem_obj_ggtt_bound(obj))
+ continue;
+
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
break;
@@ -895,7 +954,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].valid = true;
- i915_record_ring_state(dev, ring, &error->ring[i]);
+ i915_record_ring_state(dev, error, ring, &error->ring[i]);
request = i915_gem_find_active_request(ring);
if (request) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 69a7960c36bb..6ef9d6fabf80 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
if ((dev_priv->irq_mask & mask) != 0) {
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (!intel_irqs_enabled(dev_priv))
return;
if ((dev_priv->irq_mask & mask) != mask) {
@@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
dev_priv->gt_irq_mask &= ~interrupt_mask;
@@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
POSTING_READ(GTIMR);
}
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
}
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
@@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->pm_irq_mask;
@@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
}
}
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, mask);
}
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
@@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->pm_irq_mask;
@@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
}
}
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, mask);
}
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, 0);
}
@@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
I915_WRITE(SDEIMR, sdeimr);
@@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
return true;
}
+static void i915_digport_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, dig_port_work);
+ unsigned long irqflags;
+ u32 long_port_mask, short_port_mask;
+ struct intel_digital_port *intel_dig_port;
+ int i, ret;
+ u32 old_bits = 0;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ long_port_mask = dev_priv->long_hpd_port_mask;
+ dev_priv->long_hpd_port_mask = 0;
+ short_port_mask = dev_priv->short_hpd_port_mask;
+ dev_priv->short_hpd_port_mask = 0;
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ bool valid = false;
+ bool long_hpd = false;
+ intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+ continue;
+
+ if (long_port_mask & (1 << i)) {
+ valid = true;
+ long_hpd = true;
+ } else if (short_port_mask & (1 << i))
+ valid = true;
+
+ if (valid) {
+ ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+ if (ret == true) {
+ /* if we get true fallback to old school hpd */
+ old_bits |= (1 << intel_dig_port->base.hpd_pin);
+ }
+ }
+ }
+
+ if (old_bits) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dev_priv->hpd_event_bits |= old_bits;
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ schedule_work(&dev_priv->hotplug_work);
+ }
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
@@ -1118,6 +1165,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
dev_priv->hpd_event_bits = 0;
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
intel_encoder = intel_connector->encoder;
if (intel_encoder->hpd_pin > HPD_NONE &&
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
@@ -1148,6 +1197,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
intel_encoder = intel_connector->encoder;
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
if (intel_encoder->hot_plug)
@@ -1214,10 +1265,138 @@ static void notify_ring(struct drm_device *dev,
trace_i915_gem_request_complete(ring);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_notify_mmio_flip(ring);
+
wake_up_all(&ring->irq_queue);
i915_queue_hangcheck(dev);
}
+static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
+ struct intel_rps_ei *rps_ei)
+{
+ u32 cz_ts, cz_freq_khz;
+ u32 render_count, media_count;
+ u32 elapsed_render, elapsed_media, elapsed_time;
+ u32 residency = 0;
+
+ cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+ cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
+
+ render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
+ media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
+
+ if (rps_ei->cz_clock == 0) {
+ rps_ei->cz_clock = cz_ts;
+ rps_ei->render_c0 = render_count;
+ rps_ei->media_c0 = media_count;
+
+ return dev_priv->rps.cur_freq;
+ }
+
+ elapsed_time = cz_ts - rps_ei->cz_clock;
+ rps_ei->cz_clock = cz_ts;
+
+ elapsed_render = render_count - rps_ei->render_c0;
+ rps_ei->render_c0 = render_count;
+
+ elapsed_media = media_count - rps_ei->media_c0;
+ rps_ei->media_c0 = media_count;
+
+ /* Convert all the counters into common unit of milli sec */
+ elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
+ elapsed_render /= cz_freq_khz;
+ elapsed_media /= cz_freq_khz;
+
+ /*
+ * Calculate overall C0 residency percentage
+ * only if elapsed time is non zero
+ */
+ if (elapsed_time) {
+ residency =
+ ((max(elapsed_render, elapsed_media) * 100)
+ / elapsed_time);
+ }
+
+ return residency;
+}
+
+/**
+ * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
+ * busy-ness calculated from C0 counters of render & media power wells
+ * @dev_priv: DRM device private
+ *
+ */
+static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+{
+ u32 residency_C0_up = 0, residency_C0_down = 0;
+ u8 new_delay, adj;
+
+ dev_priv->rps.ei_interrupt_count++;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+
+ if (dev_priv->rps.up_ei.cz_clock == 0) {
+ vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
+ vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
+ return dev_priv->rps.cur_freq;
+ }
+
+
+ /*
+ * To down throttle, C0 residency should be less than down threshold
+ * for continous EI intervals. So calculate down EI counters
+ * once in VLV_INT_COUNT_FOR_DOWN_EI
+ */
+ if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+
+ dev_priv->rps.ei_interrupt_count = 0;
+
+ residency_C0_down = vlv_c0_residency(dev_priv,
+ &dev_priv->rps.down_ei);
+ } else {
+ residency_C0_up = vlv_c0_residency(dev_priv,
+ &dev_priv->rps.up_ei);
+ }
+
+ new_delay = dev_priv->rps.cur_freq;
+
+ adj = dev_priv->rps.last_adj;
+ /* C0 residency is greater than UP threshold. Increase Frequency */
+ if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else
+ adj = 1;
+
+ if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
+ new_delay = dev_priv->rps.cur_freq + adj;
+
+ /*
+ * For better performance, jump directly
+ * to RPe if we're below it.
+ */
+ if (new_delay < dev_priv->rps.efficient_freq)
+ new_delay = dev_priv->rps.efficient_freq;
+
+ } else if (!dev_priv->rps.ei_interrupt_count &&
+ (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
+ if (adj < 0)
+ adj *= 2;
+ else
+ adj = -1;
+ /*
+ * This means, C0 residency is less than down threshold over
+ * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
+ */
+ if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
+ new_delay = dev_priv->rps.cur_freq + adj;
+ }
+
+ return new_delay;
+}
+
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -1228,11 +1407,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- if (IS_BROADWELL(dev_priv->dev))
- bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ if (INTEL_INFO(dev_priv->dev)->gen >= 8)
+ gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
else {
/* Make sure not to corrupt PMIMR state used by ringbuffer */
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1248,8 +1427,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
adj *= 2;
- else
- adj = 1;
+ else {
+ /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
+ }
new_delay = dev_priv->rps.cur_freq + adj;
/*
@@ -1264,11 +1445,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+ new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
- else
- adj = -1;
+ else {
+ /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
+ }
new_delay = dev_priv->rps.cur_freq + adj;
} else { /* unknown event */
new_delay = dev_priv->rps.cur_freq;
@@ -1368,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+ gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1382,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
return;
spin_lock(&dev_priv->irq_lock);
- ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+ gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
spin_unlock(&dev_priv->irq_lock);
iir &= GT_PARITY_ERROR(dev);
@@ -1437,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1454,6 +1639,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(0));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
@@ -1461,7 +1647,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[RCS]);
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
- I915_WRITE(GEN8_GT_IIR(0), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@@ -1469,6 +1654,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(1));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
@@ -1476,7 +1662,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS2]);
- I915_WRITE(GEN8_GT_IIR(1), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@@ -1484,10 +1669,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & GEN8_GT_PM_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(2));
if (tmp & dev_priv->pm_rps_events) {
- ret = IRQ_HANDLED;
- gen8_rps_irq_handler(dev_priv, tmp);
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
+ ret = IRQ_HANDLED;
+ gen8_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@@ -1495,11 +1680,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VECS]);
- I915_WRITE(GEN8_GT_IIR(3), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@@ -1510,23 +1695,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
+static int ilk_port_to_hotplug_shift(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ case PORT_E:
+ default:
+ return -1;
+ case PORT_B:
+ return 0;
+ case PORT_C:
+ return 8;
+ case PORT_D:
+ return 16;
+ }
+}
+
+static int g4x_port_to_hotplug_shift(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ case PORT_E:
+ default:
+ return -1;
+ case PORT_B:
+ return 17;
+ case PORT_C:
+ return 19;
+ case PORT_D:
+ return 21;
+ }
+}
+
+static inline enum port get_port_from_pin(enum hpd_pin pin)
+{
+ switch (pin) {
+ case HPD_PORT_B:
+ return PORT_B;
+ case HPD_PORT_C:
+ return PORT_C;
+ case HPD_PORT_D:
+ return PORT_D;
+ default:
+ return PORT_A; /* no hpd */
+ }
+}
+
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
+ u32 dig_hotplug_reg,
const u32 *hpd)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
+ enum port port;
bool storm_detected = false;
+ bool queue_dig = false, queue_hp = false;
+ u32 dig_shift;
+ u32 dig_port_mask = 0;
if (!hotplug_trigger)
return;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_trigger);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg);
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
+ if (!(hpd[i] & hotplug_trigger))
+ continue;
+
+ port = get_port_from_pin(i);
+ if (port && dev_priv->hpd_irq_port[port]) {
+ bool long_hpd;
+
+ if (IS_G4X(dev)) {
+ dig_shift = g4x_port_to_hotplug_shift(port);
+ long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+ } else {
+ dig_shift = ilk_port_to_hotplug_shift(port);
+ long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+ }
+ DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
+ /* for long HPD pulses we want to have the digital queue happen,
+ but we still want HPD storm detection to function. */
+ if (long_hpd) {
+ dev_priv->long_hpd_port_mask |= (1 << port);
+ dig_port_mask |= hpd[i];
+ } else {
+ /* for short HPD just trigger the digital queue */
+ dev_priv->short_hpd_port_mask |= (1 << port);
+ hotplug_trigger &= ~hpd[i];
+ }
+ queue_dig = true;
+ }
+ }
+
+ for (i = 1; i < HPD_NUM_PINS; i++) {
if (hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
/*
@@ -1546,7 +1812,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
- dev_priv->hpd_event_bits |= (1 << i);
+ if (!(dig_port_mask & hpd[i])) {
+ dev_priv->hpd_event_bits |= (1 << i);
+ queue_hp = true;
+ }
+
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
dev_priv->hpd_stats[i].hpd_last_jiffies
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -1575,7 +1845,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
* queue for otherwise the flush_work in the pageflip code will
* deadlock.
*/
- schedule_work(&dev_priv->hotplug_work);
+ if (queue_dig)
+ queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
+ if (queue_hp)
+ schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1696,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1805,26 +2078,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- if (IS_G4X(dev)) {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+ if (hotplug_status) {
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ /*
+ * Make sure hotplug status is cleared before we clear IIR, or else we
+ * may miss hotplug events.
+ */
+ POSTING_READ(PORT_HOTPLUG_STAT);
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
- } else {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+ if (IS_G4X(dev)) {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
- }
+ intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
+ } else {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
- hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev);
+ intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
+ }
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- /*
- * Make sure hotplug status is cleared before we clear IIR, or else we
- * may miss hotplug events.
- */
- POSTING_READ(PORT_HOTPLUG_STAT);
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
+ hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev);
+ }
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1835,29 +2110,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
while (true) {
- iir = I915_READ(VLV_IIR);
+ /* Find, clear, then process each source of interrupt */
+
gt_iir = I915_READ(GTIIR);
+ if (gt_iir)
+ I915_WRITE(GTIIR, gt_iir);
+
pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir)
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+ iir = I915_READ(VLV_IIR);
+ if (iir) {
+ /* Consume port before clearing IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+ I915_WRITE(VLV_IIR, iir);
+ }
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
goto out;
ret = IRQ_HANDLED;
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
- valleyview_pipestat_irq_handler(dev, iir);
-
- /* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
-
+ if (gt_iir)
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
-
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
- I915_WRITE(VLV_IIR, iir);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ valleyview_pipestat_irq_handler(dev, iir);
}
out:
@@ -1878,21 +2160,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (master_ctl == 0 && iir == 0)
break;
+ ret = IRQ_HANDLED;
+
I915_WRITE(GEN8_MASTER_IRQ, 0);
- gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+ /* Find, clear, then process each source of interrupt */
- valleyview_pipestat_irq_handler(dev, iir);
+ if (iir) {
+ /* Consume port before clearing IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+ I915_WRITE(VLV_IIR, iir);
+ }
- /* Consume port. Then clear IIR or we'll miss events */
- i9xx_hpd_irq_handler(dev);
+ gen8_gt_irq_handler(dev, dev_priv, master_ctl);
- I915_WRITE(VLV_IIR, iir);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ valleyview_pipestat_irq_handler(dev, iir);
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
-
- ret = IRQ_HANDLED;
}
return ret;
@@ -1903,8 +2191,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
+ u32 dig_hotplug_reg;
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -2010,8 +2302,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+ intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2128,6 +2424,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
}
}
+/*
+ * To handle irqs with the minimum potential races with fresh interrupts, we:
+ * 1 - Disable Master Interrupt Control.
+ * 2 - Find the source(s) of the interrupt.
+ * 3 - Clear the Interrupt Identity bits (IIR).
+ * 4 - Process the interrupt(s) that had bits set in the IIRs.
+ * 5 - Re-enable Master Interrupt Control.
+ */
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -2155,32 +2459,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
POSTING_READ(SDEIER);
}
+ /* Find, clear, then process each source of interrupt */
+
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
+ I915_WRITE(GTIIR, gt_iir);
+ ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 6)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
- I915_WRITE(GTIIR, gt_iir);
- ret = IRQ_HANDLED;
}
de_iir = I915_READ(DEIIR);
if (de_iir) {
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 7)
ivb_display_irq_handler(dev, de_iir);
else
ilk_display_irq_handler(dev, de_iir);
- I915_WRITE(DEIIR, de_iir);
- ret = IRQ_HANDLED;
}
if (INTEL_INFO(dev)->gen >= 6) {
u32 pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
- gen6_rps_irq_handler(dev_priv, pm_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
+ gen6_rps_irq_handler(dev_priv, pm_iir);
}
}
@@ -2211,36 +2517,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
+ /* Find, clear, then process each source of interrupt */
+
ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
- if (tmp & GEN8_DE_MISC_GSE)
- intel_opregion_asle_intr(dev);
- else if (tmp)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
- else
- DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
-
if (tmp) {
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
ret = IRQ_HANDLED;
+ if (tmp & GEN8_DE_MISC_GSE)
+ intel_opregion_asle_intr(dev);
+ else
+ DRM_ERROR("Unexpected DE Misc interrupt\n");
}
+ else
+ DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
- if (tmp & GEN8_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
- else if (tmp)
- DRM_ERROR("Unexpected DE Port interrupt\n");
- else
- DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
-
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
+ if (tmp & GEN8_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+ else
+ DRM_ERROR("Unexpected DE Port interrupt\n");
}
+ else
+ DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
}
for_each_pipe(pipe) {
@@ -2250,33 +2556,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
continue;
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
- if (pipe_iir & GEN8_PIPE_VBLANK)
- intel_pipe_handle_vblank(dev, pipe);
-
- if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (pipe_iir) {
+ ret = IRQ_HANDLED;
+ I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ if (pipe_iir & GEN8_PIPE_VBLANK)
+ intel_pipe_handle_vblank(dev, pipe);
- if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev, pipe);
+ if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
- if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+ hsw_pipe_crc_irq_handler(dev, pipe);
- if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
- pipe_name(pipe),
- pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
- }
+ if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+ false))
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+ }
- if (pipe_iir) {
- ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+ DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ pipe_name(pipe),
+ pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+ }
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
@@ -2288,13 +2593,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
* on older pch-split platforms. But this needs testing.
*/
u32 pch_iir = I915_READ(SDEIIR);
-
- cpt_irq_handler(dev, pch_iir);
-
if (pch_iir) {
I915_WRITE(SDEIIR, pch_iir);
ret = IRQ_HANDLED;
- }
+ cpt_irq_handler(dev, pch_iir);
+ } else
+ DRM_ERROR("The master control interrupt lied (SDE)!\n");
+
}
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2749,12 +3054,7 @@ static bool
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
{
if (INTEL_INFO(dev)->gen >= 8) {
- /*
- * FIXME: gen8 semaphore support - currently we don't emit
- * semaphores on bdw anyway, but this needs to be addressed when
- * we merge that code.
- */
- return false;
+ return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
@@ -2763,19 +3063,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
}
static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
int i;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
- /*
- * FIXME: gen8 semaphore support - currently we don't emit
- * semaphores on bdw anyway, but this needs to be addressed when
- * we merge that code.
- */
- return NULL;
+ for_each_ring(signaller, dev_priv, i) {
+ if (ring == signaller)
+ continue;
+
+ if (offset == signaller->semaphore.signal_ggtt[ring->id])
+ return signaller;
+ }
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
@@ -2788,8 +3089,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
}
}
- DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
- ring->id, ipehr);
+ DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+ ring->id, ipehr, offset);
return NULL;
}
@@ -2799,7 +3100,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, head;
- int i;
+ u64 offset = 0;
+ int i, backwards;
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
@@ -2808,13 +3110,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
/*
* HEAD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX. But limit it to just 3
- * dwords. Note that we don't care about ACTHD here since that might
+ * or 4 dwords depending on the semaphore wait command size.
+ * Note that we don't care about ACTHD here since that might
* point at at batch, and semaphores are always emitted into the
* ringbuffer itself.
*/
head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
- for (i = 4; i; --i) {
+ for (i = backwards; i; --i) {
/*
* Be paranoid and presume the hw has gone off into the wild -
* our ring is smaller than what the hardware (and hence
@@ -2834,14 +3138,19 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
return NULL;
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
- return semaphore_wait_to_signaller_ring(ring, ipehr);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+ offset = ioread32(ring->buffer->virtual_start + head + 12);
+ offset <<= 32;
+ offset = ioread32(ring->buffer->virtual_start + head + 8);
+ }
+ return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
}
static int semaphore_passed(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
- u32 seqno, ctl;
+ u32 seqno;
ring->hangcheck.deadlock++;
@@ -2853,15 +3162,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
- /* cursory check for an unkickable deadlock */
- ctl = I915_READ_CTL(signaller);
- if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
- return -1;
-
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
return 1;
- if (signaller->hangcheck.deadlock)
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
return -1;
return 0;
@@ -3158,7 +3464,9 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
for_each_pipe(pipe)
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ if (intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
GEN5_IRQ_RESET(GEN8_DE_PORT_);
GEN5_IRQ_RESET(GEN8_DE_MISC_);
@@ -3167,6 +3475,18 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
+ ~dev_priv->de_irq_mask[PIPE_B]);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
+ ~dev_priv->de_irq_mask[PIPE_C]);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3491,8 +3811,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
for_each_pipe(pipe)
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
- de_pipe_enables);
+ if (intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ dev_priv->de_irq_mask[pipe],
+ de_pipe_enables);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
}
@@ -4323,12 +4646,17 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */
- dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+ if (IS_VALLEYVIEW(dev))
+ /* WaGsvRC0ResidenncyMethod:VLV */
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ else
+ dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
@@ -4338,6 +4666,9 @@ void intel_irq_init(struct drm_device *dev)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ /* Haven't installed the IRQ handler yet */
+ dev_priv->pm._irqs_disabled = true;
+
if (IS_GEN2(dev)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4425,7 +4756,9 @@ void intel_hpd_init(struct drm_device *dev)
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled;
- if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ if (intel_connector->mst_port)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
@@ -4443,7 +4776,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev->driver->irq_uninstall(dev);
- dev_priv->pm.irqs_disabled = true;
+ dev_priv->pm._irqs_disabled = true;
}
/* Restore interrupts so we can recover from runtime PM. */
@@ -4451,7 +4784,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->pm.irqs_disabled = false;
+ dev_priv->pm._irqs_disabled = false;
dev->driver->irq_preinstall(dev);
dev->driver->irq_postinstall(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d05a2afa17dc..62ee8308d682 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -37,7 +37,7 @@ struct i915_params i915 __read_mostly = {
.enable_fbc = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
- .enable_psr = 0,
+ .enable_psr = 1,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = 1,
.enable_ips = 1,
@@ -48,6 +48,8 @@ struct i915_params i915 __read_mostly = {
.disable_display = 0,
.enable_cmd_parser = 1,
.disable_vtd_wa = 0,
+ .use_mmio_flip = 0,
+ .mmio_debug = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -117,7 +119,7 @@ MODULE_PARM_DESC(enable_ppgtt,
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
module_param_named(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: true)");
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
@@ -156,3 +158,12 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
+
+module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
+MODULE_PARM_DESC(use_mmio_flip,
+ "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
+
+module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
+MODULE_PARM_DESC(mmio_debug,
+ "Enable the MMIO debug code (default: false). This may negatively "
+ "affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e691b30b2817..fe5c27630e95 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,8 +29,8 @@
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
-#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
-#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
+#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
+ (pipe) == PIPE_B ? (b) : (c))
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -240,7 +240,7 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
@@ -266,6 +266,11 @@
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
+#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
+#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
+#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_POLL (1<<15)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -360,6 +365,7 @@
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
@@ -525,10 +531,21 @@ enum punit_power_well {
#define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
+#define PUNIT_REG_CZ_TIMESTAMP 0xce
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
+#define PUNIT_GPU_STATUS_REG 0xdb
+#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
+#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
+#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8
+#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff
+
+#define PUNIT_GPU_DUTYCYCLE_REG 0xdf
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff
+
#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
@@ -540,6 +557,11 @@ enum punit_power_well {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
+#define VLV_RP_UP_EI_THRESHOLD 90
+#define VLV_RP_DOWN_EI_THRESHOLD 70
+#define VLV_INT_COUNT_FOR_DOWN_EI 5
+
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
@@ -574,6 +596,11 @@ enum punit_power_well {
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
+#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
+#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
+#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
+#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
+#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
/**
* DOC: DPIO
@@ -761,6 +788,8 @@ enum punit_power_well {
#define _VLV_PCS_DW8_CH0 0x8220
#define _VLV_PCS_DW8_CH1 0x8420
+#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20)
+#define CHV_PCS_USEDCLKCHANNEL (1 << 21)
#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
#define _VLV_PCS01_DW8_CH0 0x0220
@@ -869,6 +898,16 @@ enum punit_power_well {
#define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+#define _CHV_CMN_DW5_CH0 0x8114
+#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
+#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
+#define CHV_BUFRIGHTENA1_FORCE (3 << 20)
+#define CHV_BUFRIGHTENA1_MASK (3 << 20)
+#define CHV_BUFLEFTENA1_DISABLE (0 << 22)
+#define CHV_BUFLEFTENA1_NORMAL (1 << 22)
+#define CHV_BUFLEFTENA1_FORCE (3 << 22)
+#define CHV_BUFLEFTENA1_MASK (3 << 22)
+
#define _CHV_CMN_DW13_CH0 0x8134
#define _CHV_CMN_DW0_CH1 0x8080
#define DPIO_CHV_S1_DIV_SHIFT 21
@@ -883,8 +922,21 @@ enum punit_power_well {
#define _CHV_CMN_DW1_CH1 0x8084
#define DPIO_AFC_RECAL (1 << 14)
#define DPIO_DCLKP_EN (1 << 13)
+#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */
#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
+#define _CHV_CMN_DW19_CH0 0x814c
+#define _CHV_CMN_DW6_CH1 0x8098
+#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
+#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
+
#define CHV_CMN_DW30 0x8178
#define DPIO_LRC_BYPASS (1 << 3)
@@ -933,6 +985,7 @@ enum punit_power_well {
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
+
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL_SWZCTL (1 << 0)
@@ -942,6 +995,9 @@ enum punit_power_well {
/*
* Instruction and interrupt control regs
*/
+#define PGTBL_CTL 0x02020
+#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
+#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
@@ -1167,6 +1223,8 @@ enum punit_power_well {
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT 12
+
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0
#define EMR 0x020b4
@@ -1567,11 +1625,10 @@ enum punit_power_well {
/*
* Clock control & power management
*/
-#define DPLL_A_OFFSET 0x6014
-#define DPLL_B_OFFSET 0x6018
-#define CHV_DPLL_C_OFFSET 0x6030
-#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
- dev_priv->info.display_mmio_offset)
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 0x6000
#define VGA1 0x6004
@@ -1659,11 +1716,10 @@ enum punit_power_well {
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
-#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
-#define CHV_DPLL_C_MD_OFFSET 0x603c
-#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
- dev_priv->info.display_mmio_offset)
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -2228,7 +2284,7 @@ enum punit_power_well {
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
-
+#define CHV_CLK_CTL1 0x101100
#define VLV_CLK_CTL2 0x101104
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -2373,6 +2429,7 @@ enum punit_power_well {
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
+#define BDW_PSR_SINGLE_FRAME (1<<30)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
@@ -2530,8 +2587,14 @@ enum punit_power_well {
#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
+#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
+#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
+#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -2585,7 +2648,7 @@ enum punit_power_well {
#define PORT_DFT_I9XX 0x61150
#define DC_BALANCE_RESET (1 << 25)
-#define PORT_DFT2_G4X 0x61154
+#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
@@ -4627,6 +4690,8 @@ enum punit_power_well {
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
+#define GEN7_L3CNTLREG2 0xB020
+#define GEN7_L3CNTLREG3 0xB024
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
@@ -4873,8 +4938,7 @@ enum punit_power_well {
#define _PCH_TRANSA_LINK_M2 0xe0048
#define _PCH_TRANSA_LINK_N2 0xe004c
-/* Per-transcoder DIP controls */
-
+/* Per-transcoder DIP controls (PCH) */
#define _VIDEO_DIP_CTL_A 0xe0200
#define _VIDEO_DIP_DATA_A 0xe0208
#define _VIDEO_DIP_GCP_A 0xe0210
@@ -4887,6 +4951,7 @@ enum punit_power_well {
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+/* Per-transcoder DIP controls (VLV) */
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
@@ -4895,12 +4960,19 @@ enum punit_power_well {
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
+#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
+#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
+#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
+
#define VLV_TVIDEO_DIP_CTL(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
+ VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
#define VLV_TVIDEO_DIP_DATA(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
+ VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
#define VLV_TVIDEO_DIP_GCP(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
/* Haswell DIP controls */
#define HSW_VIDEO_DIP_CTL_A 0x60200
@@ -5331,6 +5403,7 @@ enum punit_power_well {
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
+#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
@@ -5468,6 +5541,12 @@ enum punit_power_well {
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define CHV_CZ_CLOCK_FREQ_MODE_200 200
+#define CHV_CZ_CLOCK_FREQ_MODE_267 267
+#define CHV_CZ_CLOCK_FREQ_MODE_320 320
+#define CHV_CZ_CLOCK_FREQ_MODE_333 333
+#define CHV_CZ_CLOCK_FREQ_MODE_400 400
+
#define GEN7_GT_SCRATCH_BASE 0x4F100
#define GEN7_GT_SCRATCH_REG_NUM 8
@@ -5478,6 +5557,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15)
+#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
+#define VLV_RENDER_RC0_COUNT_EN (1<<4)
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
@@ -5486,6 +5567,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
+#define VLV_RENDER_C0_COUNT_REG 0x138118
+#define VLV_MEDIA_C0_COUNT_REG 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
@@ -5720,6 +5803,7 @@ enum punit_power_well {
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_PORT_SHIFT 28
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define TRANS_DDI_PORT_NONE (0<<28)
#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
@@ -5740,6 +5824,7 @@ enum punit_power_well {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
#define TRANS_DDI_BFI_ENABLE (1<<4)
/* DisplayPort Transport Control */
@@ -5749,6 +5834,7 @@ enum punit_power_well {
#define DP_TP_CTL_ENABLE (1<<31)
#define DP_TP_CTL_MODE_SST (0<<27)
#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_FORCE_ACT (1<<25)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
@@ -5763,15 +5849,19 @@ enum punit_power_well {
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
-#define DP_TP_STATUS_IDLE_DONE (1<<25)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
+#define DP_TP_STATUS_ACT_SENT (1<<24)
+#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
-/* Haswell */
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
@@ -5781,16 +5871,6 @@ enum punit_power_well {
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
-/* Broadwell */
-#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
-#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
-#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
-#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
-#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
-#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
-#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
-#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
-#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
@@ -5858,10 +5938,12 @@ enum punit_power_well {
/* WRPLL */
#define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060
+#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
-#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+#define WRPLL_PLL_SSC (1<<28)
+#define WRPLL_PLL_NON_SSC (2<<28)
+#define WRPLL_PLL_LCPLL (3<<28)
+#define WRPLL_PLL_REF_MASK (3<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
@@ -5880,6 +5962,7 @@ enum punit_power_well {
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
@@ -5921,7 +6004,10 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
-#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using I915_WRITE. */
+#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_BDW 0x138144
#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
#define D_COMP_COMP_FORCE (1<<8)
#define D_COMP_COMP_DISABLE (1<<0)
@@ -6002,7 +6088,8 @@ enum punit_power_well {
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
+ _MIPIB_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + B */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
@@ -6044,18 +6131,20 @@ enum punit_power_well {
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
+ _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
/* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
+#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
-#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
-#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
+#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
+ _MIPIB_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -6063,12 +6152,14 @@ enum punit_power_well {
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
-#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
-#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
-#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
-#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
-#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
+#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
+ _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
+#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
+ _MIPIB_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -6102,9 +6193,10 @@ enum punit_power_well {
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
-#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
+ _MIPIB_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -6125,78 +6217,94 @@ enum punit_power_well {
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
-#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
+ _MIPIB_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
-#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
+ _MIPIB_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
-#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
+ _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
-#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
+ _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
-#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
-#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
+#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
+ _MIPIB_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
-#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
+ _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
-#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
-
-#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
-#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
-#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
-
-#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
-#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
-#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
-
-#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
-#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
-
-#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
-#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
+#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
+ _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
+#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
+ _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
+#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
+ _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
+#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
+ _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
-#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
-#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
-
-#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
-#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
-#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
-
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
-#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */
-#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
-#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
-#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
+#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
+ _MIPIB_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -6205,27 +6313,31 @@ enum punit_power_well {
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
-#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
-#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
+#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
+ _MIPIB_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
-#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
-#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
+#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
+ _MIPIB_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
-#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
+ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
-#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
+ _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -6233,9 +6345,10 @@ enum punit_power_well {
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
-#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
-#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
+#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
+ _MIPIB_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -6245,28 +6358,33 @@ enum punit_power_well {
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
-#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
-#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
+#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
+ _MIPIB_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
-#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
-#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
+#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
+ _MIPIB_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
-#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
-#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
-
-#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
-#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
-#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
-#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
-#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
+#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
+ _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
+ _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
+#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
+ _MIPIB_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -6277,9 +6395,10 @@ enum punit_power_well {
#define DATA_TYPE_MASK (3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
-#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
-#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
+ _MIPIB_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -6295,16 +6414,18 @@ enum punit_power_well {
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
-#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
+ _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
-#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
-#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
+#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
+ _MIPIB_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -6315,34 +6436,41 @@ enum punit_power_well {
#define PREPARE_COUNT_MASK (0x3f << 0)
/* bits 31:0 */
-#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
-#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
-#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
-
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
-#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
+#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
+ _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
+ _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
-#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
-#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
+ _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
-#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
-#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
-#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
-#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
+ _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
+#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
+ _MIPIB_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -6356,9 +6484,10 @@ enum punit_power_well {
/* MIPI adapter registers */
-#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
-#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
-#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
+#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
+ _MIPIB_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -6370,50 +6499,52 @@ enum punit_power_well {
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2)
-#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
-#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
-#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
+#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
+ _MIPIB_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
-#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
-#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
+#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
+ _MIPIB_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
-#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
-#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
+ _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
-#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
-#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
+#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
+ _MIPIB_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
-#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
-#define MIPI_READ_DATA_RETURN(pipe, n) \
- (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(tc, n) \
+ (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+ + 4 * (n)) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
-#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
-#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
+#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
+ _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
/* For UMS only (deprecated): */
#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 86ce39aad0ff..ae7fd8fc27f0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -47,22 +47,45 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
intel_runtime_pm_get(dev_priv);
- /* On VLV, residency time is in CZ units rather than 1.28us */
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
- u32 clkctl2;
+ u32 reg, czcount_30ns;
- clkctl2 = I915_READ(VLV_CLK_CTL2) >>
- CLK_CTL2_CZCOUNT_30NS_SHIFT;
- if (!clkctl2) {
- WARN(!clkctl2, "bogus CZ count value");
+ if (IS_CHERRYVIEW(dev))
+ reg = CHV_CLK_CTL1;
+ else
+ reg = VLV_CLK_CTL2;
+
+ czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
+
+ if (!czcount_30ns) {
+ WARN(!czcount_30ns, "bogus CZ count value");
ret = 0;
goto out;
}
- units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+
+ units = 0;
+ div = 1000000ULL;
+
+ if (IS_CHERRYVIEW(dev)) {
+ /* Special case for 320Mhz */
+ if (czcount_30ns == 1) {
+ div = 10000000ULL;
+ units = 3125ULL;
+ } else {
+ /* chv counts are one less */
+ czcount_30ns += 1;
+ }
+ }
+
+ if (units == 0)
+ units = DIV_ROUND_UP_ULL(30ULL * bias,
+ (u64)czcount_30ns);
+
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
- div = 1000000ULL * bias;
+ div = div * bias;
}
raw_time = I915_READ(reg) * units;
@@ -461,11 +484,20 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
mutex_unlock(&dev->struct_mutex);
if (attr == &dev_attr_gt_RP0_freq_mhz) {
- val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ else
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
- val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ else
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
- val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ else
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
} else {
BUG();
}
@@ -486,6 +518,9 @@ static const struct attribute *vlv_attrs[] = {
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
&dev_attr_vlv_rpe_freq_mhz.attr,
NULL,
};
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 827498e081df..608ed302f24d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -336,11 +336,12 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
- entry->min_brightness,
+ dev_priv->vbt.backlight.min_brightness,
backlight_data->level[panel_type]);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8da5ef9f4828..2efaf8e8d9c4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
+static void hsw_crt_pre_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
+ I915_WRITE(SPLL_CTL,
+ SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
+ POSTING_READ(SPLL_CTL);
+ udelay(20);
+}
+
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
}
+
+static void hsw_crt_post_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+}
+
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = 24;
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev)) {
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
pipe_config->port_clock = 135000 * 2;
+ }
return true;
}
@@ -632,8 +660,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
- intel_runtime_pm_get(dev_priv);
-
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name,
force);
@@ -685,8 +711,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
out:
intel_display_power_put(dev_priv, power_domain);
- intel_runtime_pm_put(dev_priv);
-
return status;
}
@@ -860,6 +884,8 @@ void intel_crt_init(struct drm_device *dev)
if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
+ crt->base.pre_enable = hsw_crt_pre_enable;
+ crt->base.post_disable = hsw_crt_post_disable;
} else {
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b17b9c7c769f..5db0b5552e39 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = {
0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
+ 0x00AAAFFF, 0x000E000A,
0x00FFFFFF, 0x00020011,
0x00DB6FFF, 0x0005000F,
0x00BEEFFF, 0x000A000C,
0x00FFFFFF, 0x0005000F,
0x00DB6FFF, 0x000A000C,
- 0x00FFFFFF, 0x000A000C,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
@@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0007000E, /* DP parameters */
0x00D75FFF, 0x000E000A,
0x00BEFFFF, 0x00140006,
+ 0x80B2CFFF, 0x001B0002,
0x00FFFFFF, 0x000E000A,
0x00D75FFF, 0x00180004,
0x80CB2FFF, 0x001B0002,
0x00F7DFFF, 0x00180004,
0x80D75FFF, 0x001B0002,
- 0x80FFFFFF, 0x001B0002,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
@@ -116,7 +116,10 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ if (type == INTEL_OUTPUT_DP_MST) {
+ struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary;
+ return intel_dig_port->port;
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
@@ -277,7 +280,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
/* Configure Port Clock Select */
- I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
+ WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
@@ -364,6 +368,18 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+
+ intel_dp->DP = intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+
+}
+
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
{
@@ -385,53 +401,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
return ret;
}
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t val;
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- plls->spll_refcount--;
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Disabling SPLL\n");
- val = I915_READ(SPLL_CTL);
- WARN_ON(!(val & SPLL_PLL_ENABLE));
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
- }
- break;
- case PORT_CLK_SEL_WRPLL1:
- plls->wrpll1_refcount--;
- if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 1\n");
- val = I915_READ(WRPLL_CTL1);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL1);
- }
- break;
- case PORT_CLK_SEL_WRPLL2:
- plls->wrpll2_refcount--;
- if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 2\n");
- val = I915_READ(WRPLL_CTL2);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL2);
- }
- break;
- }
-
- WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
- WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
- WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
-
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
-}
-
#define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000)
@@ -592,9 +561,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
u32 wrpll;
wrpll = I915_READ(reg);
- switch (wrpll & SPLL_PLL_REF_MASK) {
- case SPLL_PLL_SSC:
- case SPLL_PLL_NON_SSC:
+ switch (wrpll & WRPLL_PLL_REF_MASK) {
+ case WRPLL_PLL_SSC:
+ case WRPLL_PLL_NON_SSC:
/*
* We could calculate spread here, but our checking
* code only cares about 5% accuracy, and spread is a max of
@@ -602,7 +571,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
*/
refclk = 135;
break;
- case SPLL_PLL_LCPLL:
+ case WRPLL_PLL_LCPLL:
refclk = LC_FREQ;
break;
default:
@@ -618,15 +587,14 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
-static void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- enum port port = intel_ddi_get_encoder_port(encoder);
int link_clock = 0;
u32 val, pll;
- val = I915_READ(PORT_CLK_SEL(port));
+ val = pipe_config->ddi_pll_sel;
switch (val & PORT_CLK_SEL_MASK) {
case PORT_CLK_SEL_LCPLL_810:
link_clock = 81000;
@@ -750,173 +718,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type;
- enum pipe pipe = intel_crtc->pipe;
int clock = intel_crtc->config.port_clock;
- intel_ddi_put_crtc_pll(crtc);
-
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_put_shared_dpll(intel_crtc);
- switch (intel_dp->link_bw) {
- case DP_LINK_BW_1_62:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
- break;
- case DP_LINK_BW_2_7:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
- break;
- case DP_LINK_BW_5_4:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
- break;
- default:
- DRM_ERROR("Link bandwidth %d unsupported\n",
- intel_dp->link_bw);
- return false;
- }
-
- } else if (type == INTEL_OUTPUT_HDMI) {
- uint32_t reg, val;
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_shared_dpll *pll;
+ uint32_t val;
unsigned p, n2, r2;
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- if (val == I915_READ(WRPLL_CTL1)) {
- DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL1;
- } else if (val == I915_READ(WRPLL_CTL2)) {
- DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL2;
- } else if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL1;
- } else if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL2;
- } else {
- DRM_ERROR("No WRPLLs available!\n");
- return false;
- }
-
- DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
- clock, p, n2, r2);
-
- if (reg == WRPLL_CTL1) {
- plls->wrpll1_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
- } else {
- plls->wrpll2_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
- }
+ intel_crtc->config.dpll_hw_state.wrpll = val;
- } else if (type == INTEL_OUTPUT_ANALOG) {
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
- pipe_name(pipe));
- plls->spll_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
- } else {
- DRM_ERROR("SPLL already in use\n");
+ pll = intel_get_shared_dpll(intel_crtc);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
return false;
}
- } else {
- WARN(1, "Invalid DDI encoder type %d\n", type);
- return false;
+ intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
}
-/*
- * To be called after intel_ddi_pll_select(). That one selects the PLL to be
- * used, this one actually enables the PLL.
- */
-void intel_ddi_pll_enable(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- int clock = crtc->config.port_clock;
- uint32_t reg, cur_val, new_val;
- int refcount;
- const char *pll_name;
- uint32_t enable_bit = (1 << 31);
- unsigned int p, n2, r2;
-
- BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
- BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
-
- switch (crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_LCPLL_2700:
- case PORT_CLK_SEL_LCPLL_1350:
- case PORT_CLK_SEL_LCPLL_810:
- /*
- * LCPLL should always be enabled at this point of the mode set
- * sequence, so nothing to do.
- */
- return;
-
- case PORT_CLK_SEL_SPLL:
- pll_name = "SPLL";
- reg = SPLL_CTL;
- refcount = plls->spll_refcount;
- new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
- SPLL_PLL_SSC;
- break;
-
- case PORT_CLK_SEL_WRPLL1:
- case PORT_CLK_SEL_WRPLL2:
- if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
- pll_name = "WRPLL1";
- reg = WRPLL_CTL1;
- refcount = plls->wrpll1_refcount;
- } else {
- pll_name = "WRPLL2";
- reg = WRPLL_CTL2;
- refcount = plls->wrpll2_refcount;
- }
-
- intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
- new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) |
- WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
-
- break;
-
- case PORT_CLK_SEL_NONE:
- WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
- return;
- default:
- WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
- return;
- }
-
- cur_val = I915_READ(reg);
-
- WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
- if (refcount == 1) {
- WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
- I915_WRITE(reg, new_val);
- POSTING_READ(reg);
- udelay(20);
- } else {
- WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
- }
-}
-
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -926,8 +758,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
int type = intel_encoder->type;
uint32_t temp;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
-
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->config.pipe_bpp) {
case 18:
@@ -949,6 +780,21 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
}
}
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ uint32_t temp;
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (state == true)
+ temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ else
+ temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -995,7 +841,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
+ if (IS_HASWELL(dev) &&
+ (intel_crtc->config.pch_pfit.enabled ||
+ intel_crtc->config.pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1026,7 +874,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+ if (intel_dp->is_mst) {
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ } else
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ } else if (type == INTEL_OUTPUT_DP_MST) {
+ struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
+
+ if (intel_dp->is_mst) {
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ } else
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
} else {
@@ -1043,7 +903,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
}
@@ -1082,8 +942,11 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
case TRANS_DDI_MODE_SELECT_DP_SST:
if (type == DRM_MODE_CONNECTOR_eDP)
return true;
- case TRANS_DDI_MODE_SELECT_DP_MST:
return (type == DRM_MODE_CONNECTOR_DisplayPort);
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ /* if the transcoder is in MST state then
+ * connector isn't connected */
+ return false;
case TRANS_DDI_MODE_SELECT_FDI:
return (type == DRM_MODE_CONNECTOR_VGA);
@@ -1135,6 +998,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
if ((tmp & TRANS_DDI_PORT_MASK)
== TRANS_DDI_SELECT_PORT(port)) {
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
+ return false;
+
*pipe = i;
return true;
}
@@ -1146,76 +1012,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
return false;
}
-static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- uint32_t temp, ret;
- enum port port = I915_MAX_PORTS;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
- int i;
-
- if (cpu_transcoder == TRANSCODER_EDP) {
- port = PORT_A;
- } else {
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- temp &= TRANS_DDI_PORT_MASK;
-
- for (i = PORT_B; i <= PORT_E; i++)
- if (temp == TRANS_DDI_SELECT_PORT(i))
- port = i;
- }
-
- if (port == I915_MAX_PORTS) {
- WARN(1, "Pipe %c enabled on an unknown port\n",
- pipe_name(pipe));
- ret = PORT_CLK_SEL_NONE;
- } else {
- ret = I915_READ(PORT_CLK_SEL(port));
- DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
- "0x%08x\n", pipe_name(pipe), port_name(port),
- ret);
- }
-
- return ret;
-}
-
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
- struct intel_crtc *intel_crtc;
-
- dev_priv->ddi_plls.spll_refcount = 0;
- dev_priv->ddi_plls.wrpll1_refcount = 0;
- dev_priv->ddi_plls.wrpll2_refcount = 0;
-
- for_each_pipe(pipe) {
- intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (!intel_crtc->active) {
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
- continue;
- }
-
- intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
- pipe);
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- dev_priv->ddi_plls.spll_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL1:
- dev_priv->ddi_plls.wrpll1_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL2:
- dev_priv->ddi_plls.wrpll2_refcount++;
- break;
- }
- }
-}
-
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
@@ -1261,17 +1057,13 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_edp_panel_on(intel_dp);
}
- WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
+ WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
- intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ intel_ddi_init_dp_buf_reg(intel_encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
@@ -1418,10 +1210,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
}
}
+static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
+ POSTING_READ(WRPLL_CTL(pll->id));
+ udelay(20);
+}
+
+static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL(pll->id));
+}
+
+static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ hw_state->wrpll = val;
+
+ return val & WRPLL_PLL_ENABLE;
+}
+
+static const char * const hsw_ddi_pll_names[] = {
+ "WRPLL 1",
+ "WRPLL 2",
+};
+
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
+ int i;
+
+ dev_priv->num_shared_dpll = 2;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
+ dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
+ dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ hsw_ddi_pll_get_hw_state;
+ }
/* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong.
@@ -1465,10 +1307,15 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
intel_wait_ddi_buf_idle(dev_priv, port);
}
- val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ val = DP_TP_CTL_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ if (intel_dp->is_mst)
+ val |= DP_TP_CTL_MODE_MST;
+ else {
+ val |= DP_TP_CTL_MODE_SST;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ }
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
@@ -1507,11 +1354,16 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- int type = intel_encoder->type;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ int type = intel_dig_port->base.type;
+
+ if (type != INTEL_OUTPUT_DISPLAYPORT &&
+ type != INTEL_OUTPUT_EDP &&
+ type != INTEL_OUTPUT_UNKNOWN) {
+ return;
+ }
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
- intel_dp_check_link_status(intel_dp);
+ intel_dp_hot_plug(intel_encoder);
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -1663,15 +1515,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
- struct intel_connector *hdmi_connector = NULL;
- struct intel_connector *dp_connector = NULL;
bool init_hdmi, init_dp;
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (!init_dp && !init_hdmi) {
- DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+ DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
port_name(port));
init_hdmi = true;
init_dp = true;
@@ -1701,20 +1551,28 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
DDI_A_4_LANES);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_ddi_hot_plug;
- if (init_dp)
- dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
+ if (init_dp) {
+ if (!intel_ddi_init_dp_connector(intel_dig_port))
+ goto err;
+
+ intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dev_priv->hpd_irq_port[port] = intel_dig_port;
+ }
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
- if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
- hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
-
- if (!dp_connector && !hdmi_connector) {
- drm_encoder_cleanup(encoder);
- kfree(intel_dig_port);
+ if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (!intel_ddi_init_hdmi_connector(intel_dig_port))
+ goto err;
}
+
+ return;
+
+err:
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1112d9ecc226..99eb7cad62a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -39,12 +39,45 @@
#include "i915_trace.h"
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
+/* Primary plane formats supported by all gen */
+#define COMMON_PRIMARY_FORMATS \
+ DRM_FORMAT_C8, \
+ DRM_FORMAT_RGB565, \
+ DRM_FORMAT_XRGB8888, \
+ DRM_FORMAT_ARGB8888
+
+/* Primary plane formats for gen <= 3 */
+static const uint32_t intel_primary_formats_gen2[] = {
+ COMMON_PRIMARY_FORMATS,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+};
+
+/* Primary plane formats for gen >= 4 */
+static const uint32_t intel_primary_formats_gen4[] = {
+ COMMON_PRIMARY_FORMATS, \
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+};
+
+/* Cursor formats */
+static const uint32_t intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
- ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
-static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -68,6 +101,14 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc);
+static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
+{
+ if (!connector->mst_port)
+ return connector->encoder;
+ else
+ return &connector->mst_port->mst_encoders[pipe]->base;
+}
+
typedef struct {
int min, max;
} intel_range_t;
@@ -1061,11 +1102,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (HAS_PCH_LPT(dev_priv->dev)) {
- DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
- return;
- }
-
if (WARN (!pll,
"asserting DPLL %s with no DPLL\n", state_string(state)))
return;
@@ -1481,9 +1517,6 @@ static void intel_reset_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_VALLEYVIEW(dev))
- return;
-
if (IS_CHERRYVIEW(dev)) {
enum dpio_phy phy;
u32 val;
@@ -1505,26 +1538,6 @@ static void intel_reset_dpio(struct drm_device *dev)
I915_WRITE(DISPLAY_PHY_CONTROL,
PHY_COM_LANE_RESET_DEASSERT(phy, val));
}
-
- } else {
- /*
- * If DPIO has already been reset, e.g. by BIOS, just skip all
- * this.
- */
- if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
- return;
-
- /*
- * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
- * Need to assert and de-assert PHY SB reset by gating the
- * common lane power, then un-gating it.
- * Simply ungating isn't enough to reset the PHY enough to get
- * ports and lanes running.
- */
- __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
- false);
- __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
- true);
}
}
@@ -1712,6 +1725,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
val &= ~DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1749,6 +1773,9 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+ if (WARN_ON(pll == NULL))
+ return;
+
WARN_ON(!pll->refcount);
if (pll->active == 0) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
@@ -1790,12 +1817,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
}
WARN_ON(pll->on);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->enable(dev_priv, pll);
pll->on = true;
}
-static void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1826,6 +1855,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->disable(dev_priv, pll);
pll->on = false;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
}
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -2172,6 +2203,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
u32 alignment;
int ret;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2228,6 +2261,8 @@ err_interruptible:
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
+ WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+
i915_gem_object_unpin_fence(obj);
i915_gem_object_unpin_from_display_plane(obj);
}
@@ -2314,6 +2349,7 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
goto out_unref_obj;
}
+ obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_KMS("plane fb obj %p\n", obj);
@@ -2331,7 +2367,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_crtc *c;
struct intel_crtc *i;
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
if (!intel_crtc->base.primary->fb)
return;
@@ -2352,13 +2388,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (c == &intel_crtc->base)
continue;
- if (!i->active || !c->primary->fb)
+ if (!i->active)
+ continue;
+
+ obj = intel_fb_obj(c->primary->fb);
+ if (obj == NULL)
continue;
- fb = to_intel_framebuffer(c->primary->fb);
- if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
+ if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
+ obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
}
}
@@ -2371,16 +2411,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
@@ -2461,16 +2497,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
@@ -2546,7 +2578,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- intel_increase_pllclock(crtc);
+ intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
@@ -2601,7 +2633,7 @@ void intel_display_handle_reset(struct drm_device *dev)
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
- struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
@@ -2647,7 +2679,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_framebuffer *old_fb;
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
if (intel_crtc_has_pending_flip(crtc)) {
@@ -2669,9 +2704,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
- NULL);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
@@ -2711,7 +2747,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dev_priv->display.update_primary_plane(crtc, fb, x, y);
- old_fb = crtc->primary->fb;
+ if (intel_crtc->active)
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
@@ -2720,13 +2758,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -3587,7 +3624,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void intel_put_shared_dpll(struct intel_crtc *crtc)
+void intel_put_shared_dpll(struct intel_crtc *crtc)
{
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3607,7 +3644,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3818,7 +3855,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
}
/* use legacy palette for Ironlake */
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
palreg = LGC_PALETTE(pipe);
/* Workaround : Do not read or write the pipe palette/gamma data while
@@ -3860,30 +3897,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/
}
-/**
- * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
- * cursor plane briefly if not already running after enabling the display
- * plane.
- * This workaround avoids occasional blank screens when self refresh is
- * enabled.
- */
-static void
-g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- u32 cntl = I915_READ(CURCNTR(pipe));
-
- if ((cntl & CURSOR_MODE) == 0) {
- u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
-
- I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
- I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
- intel_wait_for_vblank(dev_priv->dev, pipe);
- I915_WRITE(CURCNTR(pipe), cntl);
- I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
- I915_WRITE(FW_BLC_SELF, fw_bcl_self);
- }
-}
-
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3892,11 +3905,10 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ drm_vblank_on(dev, pipe);
+
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
- /* The fixup needs to happen before cursor is enabled */
- if (IS_G4X(dev))
- g4x_fixup_plane(dev_priv, pipe);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
@@ -3904,8 +3916,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * FIXME: Once we grow proper nuclear flip support out of this we need
+ * to compute the mask of flip planes precisely. For the time being
+ * consider this a flip from a NULL plane.
+ */
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
}
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
@@ -3917,7 +3935,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
int plane = intel_crtc->plane;
intel_crtc_wait_for_pending_flips(crtc);
- drm_crtc_vblank_off(crtc);
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
@@ -3928,6 +3945,15 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+
+ /*
+ * FIXME: Once we grow proper nuclear flip support out of this we need
+ * to compute the mask of flip planes precisely. For the time being
+ * consider this a flip to a NULL plane.
+ */
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+
+ drm_vblank_off(dev, pipe);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4006,8 +4032,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
cpt_verify_modeset(dev, intel_crtc->pipe);
intel_crtc_enable_planes(crtc);
-
- drm_crtc_vblank_on(crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4059,6 +4083,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
+ if (intel_crtc_to_shared_dpll(intel_crtc))
+ intel_enable_shared_dpll(intel_crtc);
+
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
@@ -4083,16 +4110,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
-
- if (intel_crtc->config.has_pch_encoder)
- dev_priv->display.fdi_link_train(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ dev_priv->display.fdi_link_train(crtc);
+ }
+
intel_ddi_enable_pipe_clock(intel_crtc);
ironlake_pfit_enable(intel_crtc);
@@ -4112,6 +4138,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
+ if (intel_crtc->config.dp_encoder_is_mst)
+ intel_ddi_set_vc_payload_alloc(crtc, true);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
intel_opregion_notify_encoder(encoder, true);
@@ -4121,8 +4150,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
intel_crtc_enable_planes(crtc);
-
- drm_crtc_vblank_on(crtc);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -4162,6 +4189,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_disable_pipe(dev_priv, pipe);
+ if (intel_crtc->config.dp_encoder_is_mst)
+ intel_ddi_set_vc_payload_alloc(crtc, false);
+
ironlake_pfit_disable(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4200,7 +4230,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4233,23 +4262,25 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_pipe_clock(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->post_disable)
- encoder->post_disable(encoder);
-
if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_ddi_fdi_disable(crtc);
}
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
intel_crtc->active = false;
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
+
+ if (intel_crtc_to_shared_dpll(intel_crtc))
+ intel_disable_shared_dpll(intel_crtc);
}
static void ironlake_crtc_off(struct drm_crtc *crtc)
@@ -4258,10 +4289,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc)
intel_put_shared_dpll(intel_crtc);
}
-static void haswell_crtc_off(struct drm_crtc *crtc)
-{
- intel_ddi_put_crtc_pll(crtc);
-}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
@@ -4287,6 +4314,23 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+static enum intel_display_power_domain port_to_power_domain(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+ case PORT_B:
+ return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+ case PORT_C:
+ return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+ case PORT_D:
+ return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+ default:
+ WARN_ON_ONCE(1);
+ return POWER_DOMAIN_PORT_OTHER;
+ }
+}
+
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
@@ -4305,19 +4349,10 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- switch (intel_dig_port->port) {
- case PORT_A:
- return POWER_DOMAIN_PORT_DDI_A_4_LANES;
- case PORT_B:
- return POWER_DOMAIN_PORT_DDI_B_4_LANES;
- case PORT_C:
- return POWER_DOMAIN_PORT_DDI_C_4_LANES;
- case PORT_D:
- return POWER_DOMAIN_PORT_DDI_D_4_LANES;
- default:
- WARN_ON_ONCE(1);
- return POWER_DOMAIN_PORT_OTHER;
- }
+ return port_to_power_domain(intel_dig_port->port);
+ case INTEL_OUTPUT_DP_MST:
+ intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
+ return port_to_power_domain(intel_dig_port->port);
case INTEL_OUTPUT_ANALOG:
return POWER_DOMAIN_PORT_CRT;
case INTEL_OUTPUT_DSI:
@@ -4333,7 +4368,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
unsigned long mask;
enum transcoder transcoder;
@@ -4341,7 +4375,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (pfit_enabled)
+ if (intel_crtc->config.pch_pfit.enabled ||
+ intel_crtc->config.pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4398,7 +4433,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false);
}
-int valleyview_get_vco(struct drm_i915_private *dev_priv)
+/* returns HPLL frequency in kHz */
+static int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@ -4408,7 +4444,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv)
CCK_FUSE_HPLL_FREQ_MASK;
mutex_unlock(&dev_priv->dpio_lock);
- return vco_freq[hpll_freq];
+ return vco_freq[hpll_freq] * 1000;
+}
+
+static void vlv_update_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
+ dev_priv->vlv_cdclk_freq);
+
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
}
/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -4417,12 +4469,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
- WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
- dev_priv->vlv_cdclk_freq = cdclk;
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
- if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
- else if (cdclk == 266)
+ else if (cdclk == 266667)
cmd = 1;
else
cmd = 0;
@@ -4439,18 +4490,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- if (cdclk == 400) {
+ if (cdclk == 400000) {
u32 divider, vco;
vco = valleyview_get_vco(dev_priv);
- divider = ((vco << 1) / cdclk) - 1;
+ divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- val &= ~0xf;
+ val &= ~DISPLAY_FREQUENCY_VALUES;
val |= divider;
vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+
+ if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+ DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+ 50))
+ DRM_ERROR("timed out waiting for CDclk change\n");
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -4463,54 +4519,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
* For high bandwidth configs, we set a higher latency in the bunit
* so that the core display fetch happens in time to avoid underruns.
*/
- if (cdclk == 400)
+ if (cdclk == 400000)
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
mutex_unlock(&dev_priv->dpio_lock);
- /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
- intel_i2c_reset(dev);
-}
-
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
-{
- int cur_cdclk, vco;
- int divider;
-
- vco = valleyview_get_vco(dev_priv);
-
- mutex_lock(&dev_priv->dpio_lock);
- divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- mutex_unlock(&dev_priv->dpio_lock);
-
- divider &= 0xf;
-
- cur_cdclk = (vco << 1) / (divider + 1);
-
- return cur_cdclk;
+ vlv_update_cdclk(dev);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
+ int vco = valleyview_get_vco(dev_priv);
+ int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
+
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
- * 320MHz
+ * 320/333MHz (depends on HPLL freq)
* 400MHz
* So we check to see whether we're above 90% of the lower bin and
* adjust if needed.
+ *
+ * We seem to get an unstable or solid color picture at 200MHz.
+ * Not sure what's wrong. For now use 200MHz only when all pipes
+ * are off.
*/
- if (max_pixclk > 288000) {
- return 400;
- } else if (max_pixclk > 240000) {
- return 320;
- } else
- return 266;
- /* Looks like the 200MHz CDclk freq doesn't work on some configs */
+ if (max_pixclk > freq_320*9/10)
+ return 400000;
+ else if (max_pixclk > 266667*9/10)
+ return freq_320;
+ else if (max_pixclk > 0)
+ return 266667;
+ else
+ return 200000;
}
/* compute the max pixel clock for new configuration */
@@ -4633,8 +4678,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc_enable_planes(crtc);
- drm_crtc_vblank_on(crtc);
-
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
@@ -4727,8 +4770,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- drm_crtc_vblank_on(crtc);
-
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
@@ -4768,6 +4809,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4776,9 +4827,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
+ * We also need to wait on all gmch platforms because of the
+ * self-refresh mode constraint explained above.
*/
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev, pipe);
intel_disable_pipe(dev_priv, pipe);
@@ -4805,7 +4857,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4843,23 +4894,49 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc,
}
}
+/* Master function to enable/disable CRTC and corresponding power wells */
+void intel_crtc_control(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum intel_display_power_domain domain;
+ unsigned long domains;
+
+ if (enable) {
+ if (!intel_crtc->active) {
+ domains = get_crtc_power_domains(crtc);
+ for_each_power_domain(domain, domains)
+ intel_display_power_get(dev_priv, domain);
+ intel_crtc->enabled_power_domains = domains;
+
+ dev_priv->display.crtc_enable(crtc);
+ }
+ } else {
+ if (intel_crtc->active) {
+ dev_priv->display.crtc_disable(crtc);
+
+ domains = intel_crtc->enabled_power_domains;
+ for_each_power_domain(domain, domains)
+ intel_display_power_put(dev_priv, domain);
+ intel_crtc->enabled_power_domains = 0;
+ }
+ }
+}
+
/**
* Sets the power management mode of the pipe and plane.
*/
void intel_crtc_update_dpms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
bool enable = false;
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
enable |= intel_encoder->connectors_active;
- if (enable)
- dev_priv->display.crtc_enable(crtc);
- else
- dev_priv->display.crtc_disable(crtc);
+ intel_crtc_control(crtc, enable);
intel_crtc_update_sarea(crtc, enable);
}
@@ -4869,6 +4946,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
@@ -4877,13 +4956,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
- assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
- assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
- assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
-
if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+ i915_gem_track_fb(old_obj, NULL,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = NULL;
}
@@ -4939,24 +5016,31 @@ static void intel_connector_check_state(struct intel_connector *connector)
connector->base.base.id,
connector->base.name);
+ /* there is no real hw state for MST connectors */
+ if (connector->mst_port)
+ return;
+
WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
- WARN(!encoder->connectors_active,
- "encoder->connectors_active not set\n");
- encoder_enabled = encoder->get_hw_state(encoder, &pipe);
- WARN(!encoder_enabled, "encoder not enabled\n");
- if (WARN_ON(!encoder->base.crtc))
- return;
+ if (encoder) {
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
- crtc = encoder->base.crtc;
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
- WARN(!crtc->enabled, "crtc not enabled\n");
- WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
- WARN(pipe != to_intel_crtc(crtc)->pipe,
- "encoder active on the wrong pipe\n");
+ crtc = encoder->base.crtc;
+
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
+ }
}
}
@@ -5161,9 +5245,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
- /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
- * clock survives for now. */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ /*
+ * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
+ * old clock survives for now.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
pipe_config->shared_dpll = crtc->config.shared_dpll;
if (pipe_config->has_pch_encoder)
@@ -5174,7 +5260,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
- return 400000; /* FIXME */
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int vco = valleyview_get_vco(dev_priv);
+ u32 val;
+ int divider;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ divider = val & DISPLAY_FREQUENCY_VALUES;
+
+ WARN((val & DISPLAY_FREQUENCY_STATUS) !=
+ (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+ "cdclk change in progress\n");
+
+ return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
}
static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -6125,8 +6226,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
- plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height, PAGE_SIZE);
+ plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
@@ -7145,8 +7246,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
- plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height, PAGE_SIZE);
+ plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
@@ -7163,6 +7264,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -7237,7 +7342,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
@@ -7245,14 +7349,15 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
- WARN(plls->spll_refcount, "SPLL enabled\n");
- WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
- WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+ WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+ WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+ WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
- "CPU PWM2 enabled\n");
+ if (IS_HASWELL(dev))
+ WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
@@ -7265,7 +7370,17 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
- WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
+ WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (IS_HASWELL(dev))
+ return I915_READ(D_COMP_HSW);
+ else
+ return I915_READ(D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
@@ -7276,12 +7391,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
- DRM_ERROR("Failed to disable D_COMP\n");
+ DRM_ERROR("Failed to write to D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
- I915_WRITE(D_COMP, val);
+ I915_WRITE(D_COMP_BDW, val);
+ POSTING_READ(D_COMP_BDW);
}
- POSTING_READ(D_COMP);
}
/*
@@ -7319,12 +7434,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
DRM_ERROR("LCPLL still locked\n");
- val = I915_READ(D_COMP);
+ val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
ndelay(100);
- if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
+ 1))
DRM_ERROR("D_COMP RCOMP still in progress\n");
if (allow_power_down) {
@@ -7373,7 +7489,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
POSTING_READ(LCPLL_CTL);
}
- val = I915_READ(D_COMP);
+ val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
@@ -7479,13 +7595,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
- intel_ddi_pll_enable(intel_crtc);
intel_crtc->lowfreq_avail = false;
return 0;
}
+static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll;
+ enum port port;
+ uint32_t tmp;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+ port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+
+ pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+
+ switch (pipe_config->ddi_pll_sel) {
+ case PORT_CLK_SEL_WRPLL1:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+ break;
+ }
+
+ if (pipe_config->shared_dpll >= 0) {
+ pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+
+ WARN_ON(!pll->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
+ }
+
+ /*
+ * Haswell has only FDI/PCH transcoder A. It is which is connected to
+ * DDI E. So just check whether this pipe is wired to DDI E and whether
+ * the PCH transcoder is on.
+ */
+ if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ pipe_config->has_pch_encoder = true;
+
+ tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+ pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ }
+}
+
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -7531,22 +7693,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
return false;
- /*
- * Haswell has only FDI/PCH transcoder A. It is which is connected to
- * DDI E. So just check whether this pipe is wired to DDI E and whether
- * the PCH transcoder is on.
- */
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
- if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
- I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
- pipe_config->has_pch_encoder = true;
-
- tmp = I915_READ(FDI_RX_CTL(PIPE_A));
- pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
- FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
- }
+ haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
@@ -7991,8 +8138,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int x = intel_crtc->cursor_x;
- int y = intel_crtc->cursor_y;
+ int x = crtc->cursor_x;
+ int y = crtc->cursor_y;
u32 base = 0, pos = 0;
if (on)
@@ -8036,21 +8183,27 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
intel_crtc->cursor_base = base;
}
-static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file,
- uint32_t handle,
- uint32_t width, uint32_t height)
+/*
+ * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
+ *
+ * Note that the object's reference will be consumed if the update fails. If
+ * the update succeeds, the reference of the old object (if any) will be
+ * consumed.
+ */
+static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
+ struct drm_i915_gem_object *obj,
+ uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj;
+ enum pipe pipe = intel_crtc->pipe;
unsigned old_width;
uint32_t addr;
int ret;
/* if we want to turn off the cursor ignore width and height */
- if (!handle) {
+ if (!obj) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
obj = NULL;
@@ -8066,12 +8219,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
- if (&obj->base == NULL)
- return -ENOENT;
-
if (obj->base.size < width * height * 4) {
- DRM_DEBUG_KMS("buffer is to small\n");
+ DRM_DEBUG_KMS("buffer is too small\n");
ret = -ENOMEM;
goto fail;
}
@@ -8126,9 +8275,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (intel_crtc->cursor_bo) {
if (!INTEL_INFO(dev)->cursor_needs_physical)
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
- drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
+ i915_gem_track_fb(intel_crtc->cursor_bo, obj,
+ INTEL_FRONTBUFFER_CURSOR(pipe));
mutex_unlock(&dev->struct_mutex);
old_width = intel_crtc->cursor_width;
@@ -8144,6 +8294,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
}
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+
return 0;
fail_unpin:
i915_gem_object_unpin_from_display_plane(obj);
@@ -8154,19 +8306,6 @@ fail:
return ret;
}
-static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
- intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
-
- if (intel_crtc->active)
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
- return 0;
-}
-
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
@@ -8242,7 +8381,7 @@ static u32
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
- return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
+ return PAGE_ALIGN(pitch * mode->vdisplay);
}
static struct drm_framebuffer *
@@ -8667,16 +8806,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void intel_increase_pllclock(struct drm_crtc *crtc)
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll;
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -8704,7 +8841,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -8773,28 +8910,179 @@ out:
intel_runtime_pm_put(dev_priv);
}
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+ unsigned frontbuffer_bits,
+ struct intel_engine_cs *ring)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_crtc *crtc;
+ enum pipe pipe;
if (!i915.powersave)
return;
- for_each_crtc(dev, crtc) {
- if (!crtc->primary->fb)
- continue;
-
- if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
+ for_each_pipe(pipe) {
+ if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
- intel_increase_pllclock(crtc);
+ intel_increase_pllclock(dev, pipe);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ if (ring) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.busy_bits
+ |= obj->frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits
+ &= ~obj->frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+ intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some oustanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Delay flushing when rings are still busy.*/
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+ intel_edp_psr_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned frontbuffer_bits;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ frontbuffer_bits = obj->frontbuffer_bits;
+
+ if (retire) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.flip_bits
+ |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Mask any cancelled flips. */
+ frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+ dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -8812,8 +9100,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(work);
}
- intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
-
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
@@ -8824,6 +9110,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
+ enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
@@ -8833,6 +9120,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
+ intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
@@ -9202,6 +9491,150 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
return 0;
}
+static bool use_mmio_flip(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *obj)
+{
+ /*
+ * This is not being used for older platforms, because
+ * non-availability of flip done interrupt forces us to use
+ * CS flips. Older platforms derive flip done using some clever
+ * tricks involving the flip_pending status bits and vblank irqs.
+ * So using MMIO flips there would disrupt this mechanism.
+ */
+
+ if (ring == NULL)
+ return true;
+
+ if (INTEL_INFO(ring->dev)->gen < 5)
+ return false;
+
+ if (i915.use_mmio_flip < 0)
+ return false;
+ else if (i915.use_mmio_flip > 0)
+ return true;
+ else
+ return ring != obj->ring;
+}
+
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb =
+ to_intel_framebuffer(intel_crtc->base.primary->fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ u32 dspcntr;
+ u32 reg;
+
+ intel_mark_page_flip_active(intel_crtc);
+
+ reg = DSPCNTR(intel_crtc->plane);
+ dspcntr = I915_READ(reg);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+ I915_WRITE(reg, dspcntr);
+
+ I915_WRITE(DSPSURF(intel_crtc->plane),
+ intel_crtc->unpin_work->gtt_offset);
+ POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *ring;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ if (!obj->last_write_seqno)
+ return 0;
+
+ ring = obj->ring;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true),
+ obj->last_write_seqno))
+ return 0;
+
+ ret = i915_gem_check_olr(ring, obj->last_write_seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!ring->irq_get(ring)))
+ return 0;
+
+ return 1;
+}
+
+void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct intel_crtc *intel_crtc;
+ unsigned long irq_flags;
+ u32 seqno;
+
+ seqno = ring->get_seqno(ring, false);
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ for_each_intel_crtc(ring->dev, intel_crtc) {
+ struct intel_mmio_flip *mmio_flip;
+
+ mmio_flip = &intel_crtc->mmio_flip;
+ if (mmio_flip->seqno == 0)
+ continue;
+
+ if (ring->id != mmio_flip->ring_id)
+ continue;
+
+ if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
+ intel_do_mmio_flip(intel_crtc);
+ mmio_flip->seqno = 0;
+ ring->irq_put(ring);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+}
+
+static int intel_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long irq_flags;
+ int ret;
+
+ if (WARN_ON(intel_crtc->mmio_flip.seqno))
+ return -EBUSY;
+
+ ret = intel_postpone_flip(obj);
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+ intel_do_mmio_flip(intel_crtc);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+ intel_crtc->mmio_flip.ring_id = obj->ring->id;
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+
+ /*
+ * Double check to catch cases where irq fired before
+ * mmio flip data was ready
+ */
+ intel_notify_mmio_flip(obj->ring);
+ return 0;
+}
+
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -9220,13 +9653,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
unsigned long flags;
int ret;
+ /*
+ * drm_mode_page_flip_ioctl() should already catch this, but double
+ * check to be safe. In the future we may enable pageflipping from
+ * a disabled primary plane.
+ */
+ if (WARN_ON(intel_fb_obj(old_fb) == NULL))
+ return -EBUSY;
+
/* Can't change pixel format via MI display flips. */
if (fb->pixel_format != crtc->primary->fb->pixel_format)
return -EINVAL;
@@ -9249,7 +9691,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
- work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ work->old_fb_obj = intel_fb_obj(old_fb);
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
@@ -9290,10 +9732,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
- work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+ work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
+ if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
+ /* vlv: DISPLAY_FLIP fails to change tiling */
+ ring = NULL;
+ } else if (IS_IVYBRIDGE(dev)) {
+ ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring;
if (ring == NULL || ring->id != RCS)
@@ -9309,12 +9756,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->gtt_offset =
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+ if (use_mmio_flip(ring, obj))
+ ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
+ else
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
if (ret)
goto cleanup_unpin;
+ i915_gem_track_fb(work->old_fb_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
+
intel_disable_fbc(dev);
- intel_mark_fb_busy(obj, NULL);
+ intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -9344,7 +9799,7 @@ out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event)
- drm_send_vblank_event(dev, intel_crtc->pipe, event);
+ drm_send_vblank_event(dev, pipe, event);
}
return ret;
}
@@ -10017,11 +10472,14 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(double_wide);
+ PIPE_CONF_CHECK_X(ddi_pll_sel);
+
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10083,6 +10541,14 @@ check_encoder_state(struct drm_device *dev)
if (connector->base.dpms != DRM_MODE_DPMS_OFF)
active = true;
}
+ /*
+ * for MST connectors if we unplug the connector is gone
+ * away but the encoder is still connected to a crtc
+ * until a modeset happens in response to the hotplug.
+ */
+ if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
+ continue;
+
WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
@@ -10378,20 +10844,23 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct drm_framebuffer *old_fb;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
+ obj,
NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
goto done;
}
- old_fb = crtc->primary->fb;
if (old_fb)
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = fb;
@@ -10563,12 +11032,17 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
if (is_crtc_connector_off(set)) {
config->mode_changed = true;
} else if (set->crtc->primary->fb != set->fb) {
- /* If we have no fb then treat it as a full mode set */
+ /*
+ * If we have no fb, we can only flip as long as the crtc is
+ * active, otherwise we need a full mode set. The crtc may
+ * be active if we've only disabled the primary plane, or
+ * in fastboot situations.
+ */
if (set->crtc->primary->fb == NULL) {
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
- if (intel_crtc->active && i915.fastboot) {
+ if (intel_crtc->active) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
@@ -10620,7 +11094,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == &connector->base) {
- connector->new_encoder = connector->encoder;
+ connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
break;
}
}
@@ -10666,7 +11140,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
new_crtc)) {
return -EINVAL;
}
- connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+ connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
@@ -10700,7 +11174,12 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
-
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder)
+ if (connector->new_encoder != connector->encoder)
+ connector->encoder = connector->new_encoder;
+ }
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
@@ -10806,10 +11285,24 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
} else if (config->fb_changed) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+
intel_crtc_wait_for_pending_flips(set->crtc);
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
+
+ /*
+ * We need to make sure the primary plane is re-enabled if it
+ * has previously been turned off.
+ */
+ if (!intel_crtc->primary_enabled && ret == 0) {
+ WARN_ON(!intel_crtc->active);
+ intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+ intel_crtc->pipe);
+ }
+
/*
* In the fastboot case this may be our only check of the
* state after boot. It would be better to only do it on
@@ -10850,26 +11343,21 @@ out_config:
}
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .cursor_set = intel_crtc_cursor_set,
- .cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
.set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
-static void intel_cpu_pll_init(struct drm_device *dev)
-{
- if (HAS_DDI(dev))
- intel_ddi_pll_init(dev);
-}
-
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
val = I915_READ(PCH_DPLL(pll->id));
hw_state->dpll = val;
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
@@ -10951,7 +11439,9 @@ static void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (HAS_DDI(dev))
+ intel_ddi_pll_init(dev);
+ else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ibx_pch_dpll_init(dev);
else
dev_priv->num_shared_dpll = 0;
@@ -10959,17 +11449,328 @@ static void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
}
+static int
+intel_primary_plane_disable(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc;
+
+ if (!plane->fb)
+ return 0;
+
+ BUG_ON(!plane->crtc);
+
+ intel_crtc = to_intel_crtc(plane->crtc);
+
+ /*
+ * Even though we checked plane->fb above, it's still possible that
+ * the primary plane has been implicitly disabled because the crtc
+ * coordinates given weren't visible, or because we detected
+ * that it was 100% covered by a sprite plane. Or, the CRTC may be
+ * off and we've set a fb, but haven't actually turned on the CRTC yet.
+ * In either case, we need to unpin the FB and let the fb pointer get
+ * updated, but otherwise we don't need to touch the hardware.
+ */
+ if (!intel_crtc->primary_enabled)
+ goto disable_unpin;
+
+ intel_crtc_wait_for_pending_flips(plane->crtc);
+ intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
+ intel_plane->pipe);
+disable_unpin:
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+ intel_unpin_fb_obj(intel_fb_obj(plane->fb));
+ mutex_unlock(&dev->struct_mutex);
+ plane->fb = NULL;
+
+ return 0;
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ struct drm_rect dest = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ /* integer pixels */
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+ };
+ bool visible;
+ int ret;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &visible);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If the CRTC isn't enabled, we're just pinning the framebuffer,
+ * updating the fb pointer, and returning without touching the
+ * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
+ * turn on the display with all planes setup as desired.
+ */
+ if (!crtc->enabled) {
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * If we already called setplane while the crtc was disabled,
+ * we may have an fb pinned; unpin it.
+ */
+ if (plane->fb)
+ intel_unpin_fb_obj(old_obj);
+
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+ /* Pin and return without programming hardware */
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+ }
+
+ intel_crtc_wait_for_pending_flips(crtc);
+
+ /*
+ * If clipping results in a non-visible primary plane, we'll disable
+ * the primary plane. Note that this is a bit different than what
+ * happens if userspace explicitly disables the plane by passing fb=0
+ * because plane->fb still gets set and pinned.
+ */
+ if (!visible) {
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * Try to pin the new fb first so that we can bail out if we
+ * fail.
+ */
+ if (plane->fb != fb) {
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+ if (intel_crtc->primary_enabled)
+ intel_disable_primary_hw_plane(dev_priv,
+ intel_plane->plane,
+ intel_plane->pipe);
+
+
+ if (plane->fb != fb)
+ if (plane->fb)
+ intel_unpin_fb_obj(old_obj);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
+ ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
+ if (ret)
+ return ret;
+
+ if (!intel_crtc->primary_enabled)
+ intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+ intel_crtc->pipe);
+
+ return 0;
+}
+
+/* Common destruction function for both primary and cursor planes */
+static void intel_plane_destroy(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(intel_plane);
+}
+
+static const struct drm_plane_funcs intel_primary_plane_funcs = {
+ .update_plane = intel_primary_plane_setplane,
+ .disable_plane = intel_primary_plane_disable,
+ .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
+ int pipe)
+{
+ struct intel_plane *primary;
+ const uint32_t *intel_primary_formats;
+ int num_formats;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (primary == NULL)
+ return NULL;
+
+ primary->can_scale = false;
+ primary->max_downscale = 1;
+ primary->pipe = pipe;
+ primary->plane = pipe;
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
+ primary->plane = !pipe;
+
+ if (INTEL_INFO(dev)->gen <= 3) {
+ intel_primary_formats = intel_primary_formats_gen2;
+ num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
+ } else {
+ intel_primary_formats = intel_primary_formats_gen4;
+ num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
+ }
+
+ drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_primary_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY);
+ return &primary->base;
+}
+
+static int
+intel_cursor_plane_disable(struct drm_plane *plane)
+{
+ if (!plane->fb)
+ return 0;
+
+ BUG_ON(!plane->crtc);
+
+ return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
+}
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_rect dest = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ /* integer pixels */
+ .x2 = intel_crtc->config.pipe_src_w,
+ .y2 = intel_crtc->config.pipe_src_h,
+ };
+ bool visible;
+ int ret;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true, &visible);
+ if (ret)
+ return ret;
+
+ crtc->cursor_x = crtc_x;
+ crtc->cursor_y = crtc_y;
+ if (fb != crtc->cursor->fb) {
+ return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
+ } else {
+ intel_crtc_update_cursor(crtc, visible);
+ return 0;
+ }
+}
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_cursor_plane_update,
+ .disable_plane = intel_cursor_plane_disable,
+ .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
+ int pipe)
+{
+ struct intel_plane *cursor;
+
+ cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
+ if (cursor == NULL)
+ return NULL;
+
+ cursor->can_scale = false;
+ cursor->max_downscale = 1;
+ cursor->pipe = pipe;
+ cursor->plane = pipe;
+
+ drm_universal_plane_init(dev, &cursor->base, 0,
+ &intel_cursor_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ DRM_PLANE_TYPE_CURSOR);
+ return &cursor->base;
+}
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- int i;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
+ int i, ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
return;
- drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+ primary = intel_primary_plane_create(dev, pipe);
+ if (!primary)
+ goto fail;
+
+ cursor = intel_cursor_plane_create(dev, pipe);
+ if (!cursor)
+ goto fail;
+
+ ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
+ cursor, &intel_crtc_funcs);
+ if (ret)
+ goto fail;
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
@@ -10980,7 +11781,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/*
* On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
- * is hooked to plane B. Hence we want plane A feeding pipe B.
+ * is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
@@ -11002,6 +11803,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+ return;
+
+fail:
+ if (primary)
+ drm_plane_cleanup(primary);
+ if (cursor)
+ drm_plane_cleanup(cursor);
+ kfree(intel_crtc);
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -11021,21 +11830,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
- struct drm_mode_object *drmmode_obj;
+ struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
- DRM_MODE_OBJECT_CRTC);
+ drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
- if (!drmmode_obj) {
+ if (!drmmode_crtc) {
DRM_ERROR("no such CRTC id\n");
return -ENOENT;
}
- crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_intel_crtc(drmmode_crtc);
pipe_from_crtc_id->pipe = crtc->pipe;
return 0;
@@ -11236,6 +12044,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
+ intel_edp_psr_init(dev);
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
@@ -11249,11 +12059,14 @@ static void intel_setup_outputs(struct drm_device *dev)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
+ struct drm_device *dev = fb->dev;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
+ mutex_lock(&dev->struct_mutex);
WARN_ON(!intel_fb->obj->framebuffer_references--);
- drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+ drm_gem_object_unreference(&intel_fb->obj->base);
+ mutex_unlock(&dev->struct_mutex);
kfree(intel_fb);
}
@@ -11438,7 +12251,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -11591,6 +12404,14 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ DRM_INFO("applying backlight present quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -11659,6 +12480,15 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 5336 */
{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+ /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -11705,6 +12535,9 @@ void intel_modeset_init_hw(struct drm_device *dev)
{
intel_prepare_ddi(dev);
+ if (IS_VALLEYVIEW(dev))
+ vlv_update_cdclk(dev);
+
intel_init_clock_gating(dev);
intel_reset_dpio(dev);
@@ -11781,7 +12614,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_dpio(dev);
intel_reset_dpio(dev);
- intel_cpu_pll_init(dev);
intel_shared_dpll_init(dev);
/* Just disable it once at startup */
@@ -11897,6 +12729,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* ... */
plane = crtc->plane;
crtc->plane = !plane;
+ crtc->primary_enabled = true;
dev_priv->display.crtc_disable(&crtc->base);
crtc->plane = plane;
@@ -12006,6 +12839,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->base.base.id,
encoder->base.name);
encoder->disable(encoder);
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
}
encoder->base.crtc = NULL;
encoder->connectors_active = false;
@@ -12090,10 +12925,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active ? "enabled" : "disabled");
}
- /* FIXME: Smash this into the new shared dpll infrastructure. */
- if (HAS_DDI(dev))
- intel_ddi_setup_hw_pll_state(dev);
-
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
@@ -12107,6 +12938,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
pll->name, pll->refcount, pll->on);
+
+ if (pll->refcount)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -12224,7 +13058,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_crtc *c;
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
@@ -12241,11 +13075,11 @@ void intel_modeset_gem_init(struct drm_device *dev)
*/
mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) {
- if (!c->primary->fb)
+ obj = intel_fb_obj(c->primary->fb);
+ if (obj == NULL)
continue;
- fb = to_intel_framebuffer(c->primary->fb);
- if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
+ if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -12266,7 +13100,6 @@ void intel_connector_unregister(struct intel_connector *intel_connector)
void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
struct drm_connector *connector;
/*
@@ -12276,6 +13109,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
*/
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
+ dev_priv->pm._irqs_disabled = true;
+
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -12286,14 +13121,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_unregister_dsm_handler();
- for_each_crtc(dev, crtc) {
- /* Skip inactive CRTCs */
- if (!crtc->primary->fb)
- continue;
-
- intel_increase_pllclock(crtc);
- }
-
intel_disable_fbc(dev);
intel_disable_gt_powersave(dev);
@@ -12461,7 +13288,7 @@ intel_display_capture_error_state(struct drm_device *dev)
error->pipe[i].source = I915_READ(PIPESRC(i));
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 99f033f69189..eb52ecfe14cf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -28,6 +28,8 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -112,7 +114,7 @@ static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static int
+int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -336,6 +338,37 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
+/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
+ This function only applicable when panel PM state is not to be tracked */
+static int edp_notify_handler(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
+ edp_notifier);
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_div;
+ u32 pp_ctrl_reg, pp_div_reg;
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ if (!is_edp(intel_dp) || code != SYS_RESTART)
+ return 0;
+
+ if (IS_VALLEYVIEW(dev)) {
+ pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
+ pp_div = I915_READ(pp_div_reg);
+ pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+ /* 0x1F write to PP_DIV_REG sets max cycle delay */
+ I915_WRITE(pp_div_reg, pp_div | 0x1F);
+ I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
+ msleep(intel_dp->panel_power_cycle_delay);
+ }
+
+ return 0;
+}
+
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -740,12 +773,29 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
{
struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
- sysfs_remove_link(&intel_connector->base.kdev->kobj,
- intel_dp->aux.ddc.dev.kobj.name);
+ if (!intel_connector->mst_port)
+ sysfs_remove_link(&intel_connector->base.kdev->kobj,
+ intel_dp->aux.ddc.dev.kobj.name);
intel_connector_unregister(intel_connector);
}
static void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ }
+}
+
+static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw)
{
@@ -756,8 +806,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
if (IS_G4X(dev)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
- } else if (IS_HASWELL(dev)) {
- /* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
@@ -873,8 +921,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
- for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
- for (clock = min_clock; clock <= max_clock; clock++) {
+ for (clock = min_clock; clock <= max_clock; clock++) {
+ for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -928,7 +976,10 @@ found:
&pipe_config->dp_m2_n2);
}
- intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+ if (HAS_DDI(dev))
+ hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+ else
+ intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
return true;
}
@@ -1316,8 +1367,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
- edp_wait_backlight_off(intel_dp);
-
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(intel_dp);
@@ -1353,6 +1402,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("\n");
+
+ intel_panel_enable_backlight(intel_dp->attached_connector);
+
/*
* If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP
@@ -1367,8 +1419,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
-
- intel_panel_enable_backlight(intel_dp->attached_connector);
}
void intel_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1381,8 +1431,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return;
- intel_panel_disable_backlight(intel_dp->attached_connector);
-
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
@@ -1392,6 +1440,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
intel_dp->last_backlight_off = jiffies;
+
+ edp_wait_backlight_off(intel_dp);
+
+ intel_panel_disable_backlight(intel_dp->attached_connector);
}
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1613,11 +1665,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static bool is_edp_psr(struct drm_device *dev)
+static bool is_edp_psr(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return dev_priv->psr.sink_support;
+ return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
@@ -1665,9 +1715,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_vsc_psr psr_vsc;
- if (intel_dp->psr_setup_done)
- return;
-
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
@@ -1679,22 +1726,25 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-
- intel_dp->psr_setup_done = true;
}
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
+ bool only_standby = false;
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
/* Enable PSR in sink */
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
@@ -1713,18 +1763,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+ bool only_standby = false;
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
+ val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
@@ -1742,18 +1798,15 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- dev_priv->psr.source_ok = false;
+ lockdep_assert_held(&dev_priv->psr.lock);
+ lockdep_assert_held(&dev->struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
- if (!HAS_PSR(dev)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
- return false;
- }
+ dev_priv->psr.source_ok = false;
- if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
- (dig_port->port != PORT_A)) {
+ if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
@@ -1763,29 +1816,9 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- crtc = dig_port->base.base.crtc;
- if (crtc == NULL) {
- DRM_DEBUG_KMS("crtc not active for PSR\n");
- return false;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc_active(crtc)) {
- DRM_DEBUG_KMS("crtc not active for PSR\n");
- return false;
- }
-
- obj = to_intel_framebuffer(crtc->primary->fb)->obj;
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
- return false;
- }
-
- if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
- return false;
- }
+ /* Below limitations aren't valid for Broadwell */
+ if (IS_BROADWELL(dev))
+ goto out;
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
@@ -1798,35 +1831,60 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ out:
dev_priv->psr.source_ok = true;
return true;
}
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
-
- if (!intel_edp_psr_match_conditions(intel_dp) ||
- intel_edp_is_psr_enabled(dev))
- return;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* Setup PSR once */
- intel_edp_psr_setup(intel_dp);
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ WARN_ON(dev_priv->psr.active);
+ lockdep_assert_held(&dev_priv->psr.lock);
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
/* Enable PSR on the host */
intel_edp_psr_enable_source(intel_dp);
+
+ dev_priv->psr.active = true;
}
void intel_edp_psr_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ return;
+ }
- if (intel_edp_psr_match_conditions(intel_dp) &&
- !intel_edp_is_psr_enabled(dev))
- intel_edp_psr_do_enable(intel_dp);
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR already in use\n");
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+
+ /* Setup PSR once */
+ intel_edp_psr_setup(intel_dp);
+
+ if (intel_edp_psr_match_conditions(intel_dp))
+ dev_priv->psr.enabled = intel_dp;
+ mutex_unlock(&dev_priv->psr.lock);
}
void intel_edp_psr_disable(struct intel_dp *intel_dp)
@@ -1834,36 +1892,136 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!intel_edp_is_psr_enabled(dev))
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
return;
+ }
+
+ if (dev_priv->psr.active) {
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
- I915_WRITE(EDP_PSR_CTL(dev),
- I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ }
- /* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
+ dev_priv->psr.enabled = NULL;
+ mutex_unlock(&dev_priv->psr.lock);
+
+ cancel_delayed_work_sync(&dev_priv->psr.work);
}
-void intel_edp_psr_update(struct drm_device *dev)
+static void intel_edp_psr_work(struct work_struct *work)
{
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp = NULL;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
- if (encoder->type == INTEL_OUTPUT_EDP) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ mutex_lock(&dev_priv->psr.lock);
+ intel_dp = dev_priv->psr.enabled;
- if (!is_edp_psr(dev))
- return;
+ if (!intel_dp)
+ goto unlock;
- if (!intel_edp_psr_match_conditions(intel_dp))
- intel_edp_psr_disable(intel_dp);
- else
- if (!intel_edp_is_psr_enabled(dev))
- intel_edp_psr_do_enable(intel_dp);
- }
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck. Since psr_flush first clears this and then reschedules we
+ * won't ever miss a flush when bailing out here.
+ */
+ if (dev_priv->psr.busy_frontbuffer_bits)
+ goto unlock;
+
+ intel_edp_psr_do_enable(intel_dp);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_edp_psr_do_exit(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->psr.active) {
+ u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+
+ I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+ dev_priv->psr.active = false;
+ }
+
+}
+
+void intel_edp_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ intel_edp_psr_do_exit(dev);
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /*
+ * On Haswell sprite plane updates don't result in a psr invalidating
+ * signal in the hardware. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering.
+ */
+ if (IS_HASWELL(dev) &&
+ (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+ intel_edp_psr_do_exit(dev);
+
+ if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(100));
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
+ mutex_init(&dev_priv->psr.lock);
}
static void intel_disable_dp(struct intel_encoder *encoder)
@@ -2119,6 +2277,70 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, dport);
}
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
@@ -2156,18 +2378,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
-/*
- * These are source-specific values; current Intel hardware supports
- * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
- */
-
+/* These are source-specific values. */
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
+ if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -2183,18 +2401,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROADWELL(dev)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
- } else if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -2666,41 +2873,6 @@ intel_hsw_signal_levels(uint8_t train_set)
}
}
-static uint32_t
-intel_bdw_signal_levels(uint8_t train_set)
-{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
-
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
-
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
-
- case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
-
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
- }
-}
-
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2711,10 +2883,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_BROADWELL(dev)) {
- signal_levels = intel_bdw_signal_levels(train_set);
- mask = DDI_BUF_EMP_MASK;
- } else if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
@@ -3213,6 +3382,33 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
edp_panel_vdd_off(intel_dp, false);
}
+static bool
+intel_dp_probe_mst(struct intel_dp *intel_dp)
+{
+ u8 buf[1];
+
+ if (!intel_dp->can_mst)
+ return false;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
+ return false;
+
+ _edp_panel_vdd_on(intel_dp);
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+ if (buf[0] & DP_MST_CAP) {
+ DRM_DEBUG_KMS("Sink is MST capable\n");
+ intel_dp->is_mst = true;
+ } else {
+ DRM_DEBUG_KMS("Sink is not MST capable\n");
+ intel_dp->is_mst = false;
+ }
+ }
+ edp_panel_vdd_off(intel_dp, false);
+
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ return intel_dp->is_mst;
+}
+
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -3250,6 +3446,20 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
sink_irq_vector, 1) == 1;
}
+static bool
+intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ int ret;
+
+ ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_SINK_COUNT_ESI,
+ sink_irq_vector, 14);
+ if (ret != 14)
+ return false;
+
+ return true;
+}
+
static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
@@ -3257,6 +3467,63 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
}
+static int
+intel_dp_check_mst_status(struct intel_dp *intel_dp)
+{
+ bool bret;
+
+ if (intel_dp->is_mst) {
+ u8 esi[16] = { 0 };
+ int ret = 0;
+ int retry;
+ bool handled;
+ bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+go_again:
+ if (bret == true) {
+
+ /* check link status - esi[10] = 0x200c */
+ if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+
+ DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+
+ if (handled) {
+ for (retry = 0; retry < 3; retry++) {
+ int wret;
+ wret = drm_dp_dpcd_write(&intel_dp->aux,
+ DP_SINK_COUNT_ESI+1,
+ &esi[1], 3);
+ if (wret == 3) {
+ break;
+ }
+ }
+
+ bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+ if (bret == true) {
+ DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ goto go_again;
+ }
+ } else
+ ret = 0;
+
+ return ret;
+ } else {
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ /* send a hotplug event */
+ drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
+ }
+ }
+ return -EINVAL;
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -3265,15 +3532,16 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
-
void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- /* FIXME: This access isn't protected by any locks. */
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
if (!intel_encoder->connectors_active)
return;
@@ -3485,8 +3753,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
-
- intel_runtime_pm_get(dev_priv);
+ bool ret;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
@@ -3494,6 +3761,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ if (intel_dp->is_mst) {
+ /* MST devices are disconnected from a monitor POV */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ status = connector_status_disconnected;
+ goto out;
+ }
+
intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
@@ -3506,6 +3781,16 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp_probe_oui(intel_dp);
+ ret = intel_dp_probe_mst(intel_dp);
+ if (ret) {
+ /* if we are in MST mode then this connector
+ won't appear connected or have anything with EDID on it */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ status = connector_status_disconnected;
+ goto out;
+ }
+
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
@@ -3522,9 +3807,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
out:
intel_display_power_put(dev_priv, power_domain);
-
- intel_runtime_pm_put(dev_priv);
-
return status;
}
@@ -3701,12 +3983,17 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
drm_dp_aux_unregister(&intel_dp->aux);
+ intel_dp_mst_encoder_cleanup(intel_dig_port);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (intel_dp->edp_notifier.notifier_call) {
+ unregister_reboot_notifier(&intel_dp->edp_notifier);
+ intel_dp->edp_notifier.notifier_call = NULL;
+ }
}
kfree(intel_dig_port);
}
@@ -3729,12 +4016,64 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-static void
+void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ return;
+}
+
+bool
+intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+ intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+
+ DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
+ long_hpd ? "long" : "short");
+
+ if (long_hpd) {
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ goto mst_fail;
- intel_dp_check_link_status(intel_dp);
+ if (!intel_dp_get_dpcd(intel_dp)) {
+ goto mst_fail;
+ }
+
+ intel_dp_probe_oui(intel_dp);
+
+ if (!intel_dp_probe_mst(intel_dp))
+ goto mst_fail;
+
+ } else {
+ if (intel_dp->is_mst) {
+ ret = intel_dp_check_mst_status(intel_dp);
+ if (ret == -EINVAL)
+ goto mst_fail;
+ }
+
+ if (!intel_dp->is_mst) {
+ /*
+ * we'll check the link status via the normal hot plug path later -
+ * but for short hpds we should check it now
+ */
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ }
+ }
+ return false;
+mst_fail:
+ /* if we were in MST mode, and device is not there get out of MST mode */
+ if (intel_dp->is_mst) {
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ }
+ return true;
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -3785,7 +4124,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
return false;
}
-static void
+void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -3998,6 +4337,11 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
+ /*
+ * FIXME: This needs proper synchronization with psr state. But really
+ * hard to tell without seeing the user of this function of this code.
+ * Check locking and ordering once that lands.
+ */
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
@@ -4184,6 +4528,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_unlock(&dev->mode_config.mutex);
+ if (IS_VALLEYVIEW(dev)) {
+ intel_dp->edp_notifier.notifier_call = edp_notify_handler;
+ register_reboot_notifier(&intel_dp->edp_notifier);
+ }
+
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
@@ -4279,7 +4628,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector);
- intel_dp->psr_setup_done = false;
+ /* init MST on ports that can support it */
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (port == PORT_B || port == PORT_C || port == PORT_D) {
+ intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
+ }
+ }
if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
drm_dp_aux_unregister(&intel_dp->aux);
@@ -4311,6 +4665,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -4337,6 +4692,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
@@ -4366,9 +4722,55 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
+ intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dev_priv->hpd_irq_port[port] = intel_dig_port;
+
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
kfree(intel_connector);
}
}
+
+void intel_dp_mst_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* disable MST */
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port)
+ continue;
+
+ if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+ if (intel_dig_port->dp.is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+ }
+ }
+}
+
+void intel_dp_mst_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port)
+ continue;
+ if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ int ret;
+
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+ if (ret != 0) {
+ intel_dp_check_mst_status(&intel_dig_port->dp);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
new file mode 100644
index 000000000000..d9a7a7865f66
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = encoder->base.dev;
+ int bpp;
+ int lane_count, slots;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct intel_connector *found = NULL, *intel_connector;
+ int mst_pbn;
+
+ pipe_config->dp_encoder_is_mst = true;
+ pipe_config->has_pch_encoder = false;
+ pipe_config->has_dp_encoder = true;
+ bpp = 24;
+ /*
+ * for MST we always configure max link bw - the spec doesn't
+ * seem to suggest we should do otherwise.
+ */
+ lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
+ intel_dp->lane_count = lane_count;
+
+ pipe_config->pipe_bpp = 24;
+ pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ if (intel_connector->new_encoder == encoder) {
+ found = intel_connector;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_ERROR("can't find connector\n");
+ return false;
+ }
+
+ mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+
+ pipe_config->pbn = mst_pbn;
+ slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
+
+ intel_link_compute_m_n(bpp, lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ pipe_config->dp_m_n.tu = slots;
+ return true;
+
+}
+
+static void intel_mst_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ int ret;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+ if (ret) {
+ DRM_ERROR("failed to update payload %d\n", ret);
+ }
+}
+
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ /* this can fail */
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+ /* and this can also fail */
+ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+
+ intel_dp->active_mst_links--;
+ intel_mst->port = NULL;
+ if (intel_dp->active_mst_links == 0) {
+ intel_dig_port->base.post_disable(&intel_dig_port->base);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ }
+}
+
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+ uint32_t temp;
+ struct intel_connector *found = NULL, *intel_connector;
+ int slots;
+ struct drm_crtc *crtc = encoder->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ if (intel_connector->new_encoder == encoder) {
+ found = intel_connector;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_ERROR("can't find connector\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ intel_mst->port = found->port;
+
+ if (intel_dp->active_mst_links == 0) {
+ enum port port = intel_ddi_get_encoder_port(encoder);
+
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
+
+ intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+
+ ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
+ intel_mst->port, intel_crtc->config.pbn, &slots);
+ if (ret == false) {
+ DRM_ERROR("failed to allocate vcpi\n");
+ return;
+ }
+
+
+ intel_dp->active_mst_links++;
+ temp = I915_READ(DP_TP_STATUS(port));
+ I915_WRITE(DP_TP_STATUS(port), temp);
+
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+}
+
+static void intel_mst_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
+ 1))
+ DRM_ERROR("Timed out waiting for ACT sent\n");
+
+ ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+
+ ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+}
+
+static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ *pipe = intel_mst->pipe;
+ if (intel_mst->port)
+ return true;
+ return false;
+}
+
+static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ u32 temp, flags = 0;
+
+ pipe_config->has_dp_encoder = true;
+
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (temp & TRANS_DDI_PHSYNC)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (temp & TRANS_DDI_PVSYNC)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ switch (temp & TRANS_DDI_BPC_MASK) {
+ case TRANS_DDI_BPC_6:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case TRANS_DDI_BPC_8:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case TRANS_DDI_BPC_10:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case TRANS_DDI_BPC_12:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+ pipe_config->adjusted_mode.flags |= flags;
+ intel_dp_get_m_n(crtc, pipe_config);
+
+ intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
+}
+
+static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct edid *edid;
+ int ret;
+
+ edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static enum drm_connector_status
+intel_mst_port_dp_detect(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
+}
+
+static enum drm_connector_status
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status;
+ status = intel_mst_port_dp_detect(connector);
+ return status;
+}
+
+static int
+intel_dp_mst_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void
+intel_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_dp_mst_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dp_mst_set_property,
+ .destroy = intel_dp_mst_connector_destroy,
+};
+
+static int intel_dp_mst_get_modes(struct drm_connector *connector)
+{
+ return intel_dp_mst_get_ddc_modes(connector);
+}
+
+static enum drm_mode_status
+intel_dp_mst_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO - validate mode against available PBN for link */
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ return &intel_dp->mst_encoders[0]->base.base;
+}
+
+static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
+ .get_modes = intel_dp_mst_get_modes,
+ .mode_valid = intel_dp_mst_mode_valid,
+ .best_encoder = intel_mst_best_encoder,
+};
+
+static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_mst);
+}
+
+static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
+ .destroy = intel_dp_mst_encoder_destroy,
+};
+
+static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+{
+ if (connector->encoder) {
+ enum pipe pipe;
+ if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+ return false;
+ return true;
+ }
+ return false;
+}
+
+static void intel_connector_add_to_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ int i;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector)
+ return NULL;
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+
+ intel_connector->unregister = intel_connector_unregister;
+ intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->mst_port = intel_dp;
+ intel_connector->port = port;
+
+ for (i = PIPE_A; i <= PIPE_C; i++) {
+ drm_mode_connector_attach_encoder(&intel_connector->base,
+ &intel_dp->mst_encoders[i]->base.base);
+ }
+ intel_dp_add_properties(intel_dp, connector);
+
+ drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_mode_connector_set_path_property(connector, pathprop);
+ drm_reinit_primary_mode_group(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_add_to_fbdev(intel_connector);
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_connector_register(&intel_connector->base);
+ return connector;
+}
+
+static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+ /* need to nuke the connector */
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ intel_connector->unregister(intel_connector);
+
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_remove_from_fbdev(intel_connector);
+ drm_connector_cleanup(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ drm_reinit_primary_mode_group(dev);
+
+ kfree(intel_connector);
+ DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+static struct drm_dp_mst_topology_cbs mst_cbs = {
+ .add_connector = intel_dp_add_mst_connector,
+ .destroy_connector = intel_dp_destroy_mst_connector,
+ .hotplug = intel_dp_mst_hotplug,
+};
+
+static struct intel_dp_mst_encoder *
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst;
+ struct intel_encoder *intel_encoder;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
+
+ if (!intel_mst)
+ return NULL;
+
+ intel_mst->pipe = pipe;
+ intel_encoder = &intel_mst->base;
+ intel_mst->primary = intel_dig_port;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+ DRM_MODE_ENCODER_DPMST);
+
+ intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->crtc_mask = 0x7;
+ intel_encoder->cloneable = 0;
+
+ intel_encoder->compute_config = intel_dp_mst_compute_config;
+ intel_encoder->disable = intel_mst_disable_dp;
+ intel_encoder->post_disable = intel_mst_post_disable_dp;
+ intel_encoder->pre_enable = intel_mst_pre_enable_dp;
+ intel_encoder->enable = intel_mst_enable_dp;
+ intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
+ intel_encoder->get_config = intel_dp_mst_enc_get_config;
+
+ return intel_mst;
+
+}
+
+static bool
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
+{
+ int i;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ for (i = PIPE_A; i <= PIPE_C; i++)
+ intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+ return true;
+}
+
+int
+intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ int ret;
+
+ intel_dp->can_mst = true;
+ intel_dp->mst_mgr.cbs = &mst_cbs;
+
+ /* create encoders */
+ intel_dp_create_fake_mst_encoders(intel_dig_port);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
+ if (ret) {
+ intel_dp->can_mst = false;
+ return ret;
+ }
+ return 0;
+}
+
+void
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ if (!intel_dp->can_mst)
+ return;
+
+ drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
+ /* encoders will get killed by normal cleanup */
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index eaa27ee9e367..8a475a6909c3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,7 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
/**
* _wait_for - magic (register) wait macro
@@ -100,6 +100,7 @@
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_DSI 9
#define INTEL_OUTPUT_UNKNOWN 10
+#define INTEL_OUTPUT_DP_MST 11
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -165,6 +166,7 @@ struct intel_panel {
struct {
bool present;
u32 level;
+ u32 min;
u32 max;
bool enabled;
bool combination_mode; /* gen 2/4 only */
@@ -207,6 +209,10 @@ struct intel_connector {
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
+
+ void *port; /* store this opaque as its illegal to dereference it */
+
+ struct intel_dp *mst_port;
};
typedef struct dpll {
@@ -307,6 +313,9 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
+ /* PORT_CLK_SEL for DDI ports. */
+ uint32_t ddi_pll_sel;
+
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
@@ -338,6 +347,7 @@ struct intel_crtc_config {
u32 pos;
u32 size;
bool enabled;
+ bool force_thru;
} pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */
@@ -347,6 +357,9 @@ struct intel_crtc_config {
bool ips_enabled;
bool double_wide;
+
+ bool dp_encoder_is_mst;
+ int pbn;
};
struct intel_pipe_wm {
@@ -358,6 +371,11 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
+struct intel_mmio_flip {
+ u32 seqno;
+ u32 ring_id;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -384,7 +402,6 @@ struct intel_crtc {
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
- int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_base;
@@ -394,8 +411,6 @@ struct intel_crtc {
struct intel_crtc_config *new_config;
bool new_enabled;
- uint32_t ddi_pll_sel;
-
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
@@ -412,10 +427,12 @@ struct intel_crtc {
wait_queue_head_t vbl_wait;
int scanline_offset;
+ struct intel_mmio_flip mmio_flip;
};
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
+ uint32_t vert_pixels;
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
@@ -428,7 +445,6 @@ struct intel_plane {
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
- u32 lut_r[1024], lut_g[1024], lut_b[1024];
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
@@ -481,6 +497,7 @@ struct cxsr_latency {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi {
u32 hdmi_reg;
@@ -491,6 +508,7 @@ struct intel_hdmi {
bool has_audio;
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
+ enum hdmi_picture_aspect aspect_ratio;
void (*write_infoframe)(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
@@ -499,6 +517,7 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
+struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
/**
@@ -537,10 +556,20 @@ struct intel_dp {
unsigned long last_power_cycle;
unsigned long last_power_on;
unsigned long last_backlight_off;
- bool psr_setup_done;
+
+ struct notifier_block edp_notifier;
+
bool use_tps3;
+ bool can_mst; /* this port supports mst */
+ bool is_mst;
+ int active_mst_links;
+ /* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
+ /* mst connector list */
+ struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
+ struct drm_dp_mst_topology_mgr mst_mgr;
+
uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
* This function returns the value we have to program the AUX_CTL
@@ -564,6 +593,14 @@ struct intel_digital_port {
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
+ bool (*hpd_pulse)(struct intel_digital_port *, bool);
+};
+
+struct intel_dp_mst_encoder {
+ struct intel_encoder base;
+ enum pipe pipe;
+ struct intel_digital_port *primary;
+ void *port; /* store this opaque as its illegal to dereference it */
};
static inline int
@@ -650,6 +687,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
return container_of(encoder, struct intel_digital_port, base.base);
}
+static inline struct intel_dp_mst_encoder *
+enc_to_mst(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+}
+
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
@@ -674,17 +717,26 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+ /*
+ * We only use drm_irq_uninstall() at unload and VT switch, so
+ * this is the only thing we need to check.
+ */
+ return !dev_priv->pm._irqs_disabled;
+}
+
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void i9xx_check_fifo_underruns(struct drm_device *dev);
-
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -703,10 +755,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
bool intel_ddi_pll_select(struct intel_crtc *crtc);
-void intel_ddi_pll_enable(struct intel_crtc *crtc);
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -714,17 +763,46 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_device *dev);
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring);
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+/**
+ * intel_frontbuffer_flip - prepare frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+static inline
+void intel_frontbuffer_flip(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_control(struct drm_crtc *crtc, bool enable);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
void intel_connector_dpms(struct drm_connector *, int mode);
@@ -765,12 +843,18 @@ __intel_framebuffer_create(struct drm_device *dev,
void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
+void intel_put_shared_dpll(struct intel_crtc *crtc);
+
+/* modesetting asserts */
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -803,7 +887,6 @@ void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
-int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
@@ -824,6 +907,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -831,11 +916,24 @@ void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
-void intel_edp_psr_update(struct drm_device *dev);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-
+void intel_edp_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_psr_init(struct drm_device *dev);
+
+int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
+void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
+void intel_dp_mst_suspend(struct drm_device *dev);
+void intel_dp_mst_resume(struct drm_device *dev);
+int intel_dp_max_link_bw(struct intel_dp *intel_dp);
+void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
+/* intel_dp_mst.c */
+int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
-bool intel_dsi_init(struct drm_device *dev);
+void intel_dsi_init(struct drm_device *dev);
/* intel_dvo.c */
@@ -918,8 +1016,8 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
- u32 max);
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+ u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
@@ -938,7 +1036,9 @@ int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
+ uint32_t sprite_width,
+ uint32_t sprite_height,
+ int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
@@ -961,6 +1061,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
+void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
@@ -974,8 +1075,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id, bool enable);
+
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 7c07ee07a8ee..bfcefbf33709 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -117,17 +117,18 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ usleep_range(2500, 3000);
+
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
- usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
- usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ usleep_range(2500, 3000);
+
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2000, 2500);
+ usleep_range(2500, 3000);
}
static void intel_dsi_enable(struct intel_encoder *encoder)
@@ -271,23 +272,23 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
-
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
usleep_range(2000, 2500);
@@ -657,7 +658,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
};
-bool intel_dsi_init(struct drm_device *dev)
+void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
@@ -673,29 +674,29 @@ bool intel_dsi_init(struct drm_device *dev)
/* There is no detection method for MIPI so rely on VBT */
if (!dev_priv->vbt.has_mipi)
- return false;
+ return;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ } else {
+ DRM_ERROR("Unsupported Mipi device to reg base");
+ return;
+ }
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
- return false;
+ return;
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dsi);
- return false;
+ return;
}
intel_encoder = &intel_dsi->base;
encoder = &intel_encoder->base;
intel_dsi->attached_connector = intel_connector;
- if (IS_VALLEYVIEW(dev)) {
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else {
- DRM_ERROR("Unsupported Mipi device to reg base");
- return false;
- }
-
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -753,12 +754,10 @@ bool intel_dsi_init(struct drm_device *dev)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
- return true;
+ return;
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
-
- return false;
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 3eeb21b9fddf..933c86305237 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -404,12 +404,6 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
else
cmd |= DPI_LP_MODE;
- /* DPI virtual channel?! */
-
- mask = DPI_FIFO_EMPTY;
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
-
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 21a0d348cedc..47c7584a4aa0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
case MIPI_DSI_DCS_LONG_WRITE:
dsi_vc_dcs_write(intel_dsi, vc, data, len);
break;
- };
+ }
data += len;
@@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
- intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ intel_dsi->video_frmt_cfg_bits =
+ mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
switch (intel_dsi->escape_clk_div) {
case 0:
@@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
*
* prepare count
*/
- ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
/* exit zero count */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 3fb71a04e14f..56b47d2ffaf7 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 347d16220cd0..f475414671d8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -43,10 +43,36 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static int intel_fbdev_set_par(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_set_par(info);
+
+ if (ret == 0) {
+ /*
+ * FIXME: fbdev presumes that all callbacks also work from
+ * atomic contexts and relies on that for emergency oops
+ * printing. KMS totally doesn't do that and the locking here is
+ * by far not the only place this goes wrong. Ignore this for
+ * now until we solve this for real.
+ */
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
+ true);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
+
+ return ret;
+}
+
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ .fb_set_par = intel_fbdev_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -81,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
+ size = PAGE_ALIGN(size);
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
@@ -417,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
+ DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0b603102cb3b..f9151f6641d9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -367,6 +367,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe frame;
int ret;
+ /* Set user selected PAR to incoming mode's member */
+ adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
@@ -879,7 +882,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
struct intel_encoder *encoder;
int count = 0, count_hdmi = 0;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
@@ -1124,6 +1127,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == connector->dev->mode_config.aspect_ratio_property) {
+ switch (val) {
+ case DRM_MODE_PICTURE_ASPECT_NONE:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+ break;
+ case DRM_MODE_PICTURE_ASPECT_4_3:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
+ break;
+ case DRM_MODE_PICTURE_ASPECT_16_9:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -1229,6 +1249,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
}
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1416,11 +1500,22 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
};
static void
+intel_attach_aspect_ratio_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_aspect_ratio_property(connector->dev))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.aspect_ratio_property,
+ DRM_MODE_PICTURE_ASPECT_NONE);
+}
+
+static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_hdmi->color_range_auto = true;
+ intel_attach_aspect_ratio_property(connector);
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1467,7 +1562,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
- } else if (!HAS_PCH_SPLIT(dev)) {
+ } else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (HAS_DDI(dev)) {
@@ -1528,6 +1623,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d33b61d0dd33..b31088a551f2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,11 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-enum disp_clk {
- CDCLK,
- CZCLK
-};
-
struct gmbus_port {
const char *name;
int reg;
@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter);
}
-static int get_disp_clk_div(struct drm_i915_private *dev_priv,
- enum disp_clk clk)
-{
- u32 reg_val;
- int clk_ratio;
-
- reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
-
- if (clk == CDCLK)
- clk_ratio =
- ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
- else
- clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
-
- return clk_ratio;
-}
-
-static void gmbus_set_freq(struct drm_i915_private *dev_priv)
-{
- int vco, gmbus_freq = 0, cdclk_div;
-
- BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
-
- vco = valleyview_get_vco(dev_priv);
-
- /* Get the CDCLK divide ratio */
- cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
-
- /*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
- */
- if (cdclk_div)
- gmbus_freq = (vco << 1) / cdclk_div;
-
- if (WARN_ON(gmbus_freq == 0))
- return;
-
- I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
-}
-
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * In BIOS-less system, program the correct gmbus frequency
- * before reading edid.
- */
- if (IS_VALLEYVIEW(dev))
- gmbus_set_freq(dev_priv);
-
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d29a83fd163..881361c0f27e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,6 +51,7 @@ struct intel_lvds_encoder {
bool is_dual_link;
u32 reg;
+ u32 a3_power;
struct intel_lvds_connector *attached_connector;
};
@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ enum intel_display_power_domain power_domain;
u32 tmp;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
@@ -111,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags;
+ /* gen2/3 store dither state in pfit control, needs to match */
+ if (INTEL_INFO(dev)->gen < 4) {
+ tmp = I915_READ(PFIT_CONTROL);
+
+ pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ }
+
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev_priv->dev))
@@ -165,8 +178,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
+ * panels behave in the two modes. For now, let's just maintain the
+ * value we got from the BIOS.
*/
+ temp &= ~LVDS_A3_POWER_MASK;
+ temp |= lvds_encoder->a3_power;
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
@@ -264,7 +280,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
@@ -279,8 +294,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return false;
}
- if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
- LVDS_A3_POWER_UP)
+ if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
lvds_bpp = 8*3;
else
lvds_bpp = 6*3;
@@ -1081,6 +1095,9 @@ out:
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
+ lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
+ LVDS_A3_POWER_MASK;
+
/*
* Unlock registers and just
* leave them unlocked
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 2e2c71fcc9ed..ca52ad2ae7d1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -352,6 +352,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_DP_MST:
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
break;
case INTEL_OUTPUT_EDP:
@@ -403,6 +404,15 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+ /*
+ * If the acpi_video interface is not supposed to be used, don't
+ * bother processing backlight level change requests from firmware.
+ */
+ if (!acpi_video_verify_backlight_support()) {
+ DRM_DEBUG_KMS("opregion backlight request ignored\n");
+ return 0;
+ }
+
if (!(bclp & ASLE_BCLP_VALID))
return ASLC_BACKLIGHT_FAILED;
@@ -418,7 +428,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
- intel_panel_set_backlight(intel_connector, bclp, 255);
+ intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index daa118978eec..dc2f4f26c961 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
}
intel_overlay_release_old_vid_tail(overlay);
+
+
+ i915_gem_track_fb(overlay->old_vid_bo, NULL,
+ INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
return 0;
}
@@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
bool scale_changed = false;
struct drm_device *dev = overlay->dev;
u32 swidth, swidthsw, sheight, ostride;
+ enum pipe pipe = overlay->crtc->pipe;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
oconfig |= OCONF_CSC_MODE_BT709;
- oconfig |= overlay->crtc->pipe == 0 ?
+ oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
@@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
+ i915_gem_track_fb(overlay->vid_bo, new_bo,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = new_bo;
+ intel_frontbuffer_flip(dev,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
return 0;
out_unpin:
@@ -1028,7 +1039,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_intel_overlay_put_image *put_image_rec = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct drm_mode_object *drmmode_obj;
+ struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
@@ -1057,13 +1068,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
if (!params)
return -ENOMEM;
- drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!drmmode_obj) {
+ drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id);
+ if (!drmmode_crtc) {
ret = -ENOENT;
goto out_free;
}
- crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_intel_crtc(drmmode_crtc);
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle));
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 38a98570d10c..59b028f0b1e8 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
- /* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
+ /* Make sure pre-965 set dither correctly for 18bpp panels. */
+ if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border;
@@ -398,6 +398,69 @@ intel_panel_detect(struct drm_device *dev)
}
}
+/**
+ * scale - scale values from one range to another
+ *
+ * @source_val: value in range [@source_min..@source_max]
+ *
+ * Return @source_val in range [@source_min..@source_max] scaled to range
+ * [@target_min..@target_max].
+ */
+static uint32_t scale(uint32_t source_val,
+ uint32_t source_min, uint32_t source_max,
+ uint32_t target_min, uint32_t target_max)
+{
+ uint64_t target_val;
+
+ WARN_ON(source_min > source_max);
+ WARN_ON(target_min > target_max);
+
+ /* defensive */
+ source_val = clamp(source_val, source_min, source_max);
+
+ /* avoid overflows */
+ target_val = (uint64_t)(source_val - source_min) *
+ (target_max - target_min);
+ do_div(target_val, source_max - source_min);
+ target_val += target_min;
+
+ return target_val;
+}
+
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static inline u32 scale_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(user_level, 0, user_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
+ * to [hw_min..hw_max]. */
+static inline u32 clamp_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
+ hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
+
+ return hw_level;
+}
+
+/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
+static inline u32 scale_hw_to_user(struct intel_connector *connector,
+ u32 hw_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(hw_level, panel->backlight.min, panel->backlight.max,
+ 0, user_max);
+}
+
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
@@ -557,17 +620,16 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
dev_priv->display.set_backlight(connector, level);
}
-/* set backlight brightness to level in range [0..max] */
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
- u32 max)
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
- u32 freq;
+ u32 hw_level;
unsigned long flags;
- u64 n;
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
@@ -576,18 +638,46 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
WARN_ON(panel->backlight.max == 0);
- /* scale to hardware max, but be careful to not overflow */
- freq = panel->backlight.max;
- n = (u64)level * freq;
- do_div(n, max);
- level = n;
+ hw_level = scale_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(connector, hw_level);
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+/* set backlight brightness to level in range [0..max], assuming hw min is
+ * respected.
+ */
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 hw_level;
+ unsigned long flags;
+
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
+ return;
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ WARN_ON(panel->backlight.max == 0);
+
+ hw_level = clamp_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
- panel->backlight.level = level;
if (panel->backlight.device)
- panel->backlight.device->props.brightness = level;
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(connector, level);
+ intel_panel_actually_set_backlight(connector, hw_level);
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
@@ -860,7 +950,9 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
- panel->backlight.level;
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
}
dev_priv->display.enable_backlight(connector);
@@ -889,11 +981,15 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 hw_level;
int ret;
intel_runtime_pm_get(dev_priv);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- ret = intel_panel_get_backlight(connector);
+
+ hw_level = intel_panel_get_backlight(connector);
+ ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
+
drm_modeset_unlock(&dev->mode_config.connection_mutex);
intel_runtime_pm_put(dev_priv);
@@ -913,12 +1009,19 @@ static int intel_backlight_device_register(struct intel_connector *connector)
if (WARN_ON(panel->backlight.device))
return -ENODEV;
- BUG_ON(panel->backlight.max == 0);
+ WARN_ON(panel->backlight.max == 0);
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
- props.brightness = panel->backlight.level;
+
+ /*
+ * Note: Everything should work even if the backlight device max
+ * presented to the userspace is arbitrarily chosen.
+ */
props.max_brightness = panel->backlight.max;
+ props.brightness = scale_hw_to_user(connector,
+ panel->backlight.level,
+ props.max_brightness);
/*
* Note: using the same name independent of the connector prevents
@@ -964,6 +1067,19 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
* appropriately when it's 0. Use VBT and/or sane defaults.
*/
+static u32 get_backlight_min_vbt(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ /* vbt value is a coefficient in range [0..255] */
+ return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
+ 0, panel->backlight.max);
+}
+
static int bdw_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -979,6 +1095,8 @@ static int bdw_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = bdw_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1003,6 +1121,8 @@ static int pch_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = pch_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1035,6 +1155,8 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1062,6 +1184,8 @@ static int i965_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1099,6 +1223,8 @@ static int vlv_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = _vlv_get_backlight(dev, PIPE_A);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1118,8 +1244,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
int ret;
if (!dev_priv->vbt.backlight.present) {
- DRM_DEBUG_KMS("native backlight control not available per VBT\n");
- return 0;
+ if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+ } else {
+ DRM_DEBUG_KMS("no backlight present per VBT\n");
+ return 0;
+ }
}
/* set level and max in panel struct */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ee72807069e4..3f88f29a98c0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int i;
@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
+ break;
+ case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
+ break;
+ case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
+ obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config.adjusted_mode;
if (i915.enable_fbc < 0) {
@@ -529,7 +546,10 @@ void intel_update_fbc(struct drm_device *dev)
goto out_disable;
}
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+ max_width = 4096;
+ max_height = 4096;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
@@ -563,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
- if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+ if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+ drm_format_plane_cpp(fb->pixel_format, 0))) {
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
@@ -789,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
return NULL;
}
-static void pineview_disable_cxsr(struct drm_device *dev)
+void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+ I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ } else if (IS_PINEVIEW(dev)) {
+ val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
+ val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+ I915_WRITE(DSPFW3, val);
+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+ _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ I915_WRITE(FW_BLC_SELF, val);
+ } else if (IS_I915GM(dev)) {
+ val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+ _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ I915_WRITE(INSTPM, val);
+ } else {
+ return;
+ }
- /* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("memory self-refresh is %s\n",
+ enable ? "enabled" : "disabled");
}
/*
@@ -864,95 +906,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_HPLLOFF_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE,
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_wm_info = {
- G4X_FIFO_SIZE,
- G4X_MAX_WM,
- G4X_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = G4X_FIFO_SIZE,
+ .max_wm = G4X_MAX_WM,
+ .default_wm = G4X_MAX_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_wm_info = {
- VALLEYVIEW_FIFO_SIZE,
- VALLEYVIEW_MAX_WM,
- VALLEYVIEW_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = VALLEYVIEW_FIFO_SIZE,
+ .max_wm = VALLEYVIEW_MAX_WM,
+ .default_wm = VALLEYVIEW_MAX_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_cursor_wm_info = {
- I965_CURSOR_FIFO,
- VALLEYVIEW_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i965_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- I915_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i945_wm_info = {
- I945_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
+ .fifo_size = I945_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i915_wm_info = {
- I915_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
+ .fifo_size = I915_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i830_wm_info = {
- I855GM_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i845_wm_info = {
- I830_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
+ .fifo_size = I830_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
};
/**
@@ -1033,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
+ intel_set_memory_cxsr(dev_priv, false);
return;
}
@@ -1084,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
- /* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
- DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ intel_set_memory_cxsr(dev_priv, true);
} else {
- pineview_disable_cxsr(dev);
- DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ intel_set_memory_cxsr(dev_priv, false);
}
}
@@ -1316,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
+ bool cxsr_enabled;
vlv_update_drain_latency(dev);
@@ -1342,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) {
- I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ cxsr_enabled = true;
} else {
- I915_WRITE(FW_BLC_SELF_VLV,
- I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ cxsr_enabled = false;
+ intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
@@ -1365,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void g4x_update_wm(struct drm_crtc *crtc)
@@ -1375,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
+ bool cxsr_enabled;
if (g4x_compute_wm0(dev, PIPE_A,
&g4x_wm_info, latency_ns,
@@ -1394,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc)
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr)) {
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ cxsr_enabled = true;
} else {
- I915_WRITE(FW_BLC_SELF,
- I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ cxsr_enabled = false;
+ intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
@@ -1418,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i965_update_wm(struct drm_crtc *unused_crtc)
@@ -1427,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
+ bool cxsr_enabled;
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev);
@@ -1468,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ cxsr_enabled = true;
} else {
+ cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ intel_set_memory_cxsr(dev_priv, false);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -1486,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
@@ -1545,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
if (IS_I915GM(dev) && enabled) {
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
- fb = to_intel_framebuffer(enabled->primary->fb);
+ obj = intel_fb_obj(enabled->primary->fb);
/* self-refresh seems busted with untiled */
- if (fb->obj->tiling_mode == I915_TILING_NONE)
+ if (obj->tiling_mode == I915_TILING_NONE)
enabled = NULL;
}
@@ -1560,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
+ intel_set_memory_cxsr(dev_priv, false);
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
@@ -1609,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
- if (HAS_FW_BLC(dev)) {
- if (enabled) {
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
- DRM_DEBUG_KMS("memory self refresh enabled\n");
- } else
- DRM_DEBUG_KMS("memory self refresh disabled\n");
- }
+ if (enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i845_update_wm(struct drm_crtc *unused_crtc)
@@ -2707,10 +2743,11 @@ static void ilk_update_wm(struct drm_crtc *crtc)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_update_sprite_wm(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enabled, bool scaled)
+static void
+ilk_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enabled, bool scaled)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2718,6 +2755,7 @@ static void ilk_update_sprite_wm(struct drm_plane *plane,
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.vert_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
/*
@@ -2852,13 +2890,16 @@ void intel_update_watermarks(struct drm_crtc *crtc)
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
+ uint32_t sprite_width,
+ uint32_t sprite_height,
+ int pixel_size,
bool enabled, bool scaled)
{
struct drm_i915_private *dev_priv = plane->dev->dev_private;
if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+ dev_priv->display.update_sprite_wm(plane, crtc,
+ sprite_width, sprite_height,
pixel_size, enabled, scaled);
}
@@ -3147,6 +3188,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_THRESHOLD;
+ mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
+ mask &= dev_priv->pm_rps_events;
+
/* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
@@ -3250,7 +3294,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ else if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -3348,6 +3394,15 @@ static void gen6_disable_rps(struct drm_device *dev)
gen6_disable_rps_interrupts(dev);
}
+static void cherryview_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ gen8_disable_rps_interrupts(dev);
+}
+
static void valleyview_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3419,7 +3474,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
- bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3430,7 +3485,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3483,15 +3538,23 @@ static void gen8_enable_rps(struct drm_device *dev)
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ if (IS_BROADWELL(dev))
+ I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+ else
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
intel_print_rc6_info(dev, rc6_mask);
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_EI_MODE(1) |
- rc6_mask);
+ if (IS_BROADWELL(dev))
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ rc6_mask);
+ else
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
/* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
@@ -3727,7 +3790,57 @@ void gen6_update_ring_freq(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp0;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+ return rp0;
+}
+
+static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpe;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+ rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+
+ return rpe;
+}
+
+static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp1;
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+ return rp1;
+}
+
+static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpn;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+ return rpn;
+}
+
+static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp1;
+
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+ return rp1;
+}
+
+static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
@@ -3752,7 +3865,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
return rpe;
}
-int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
{
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
@@ -3766,6 +3879,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
dev_priv->vlv_pctx->stolen->start);
}
+
+/* Check that the pcbr address is not empty. */
+static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+{
+ unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+ WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+}
+
+static void cherryview_setup_pctx(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long pctx_paddr, paddr;
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ u32 pcbr;
+ int pctx_size = 32*1024;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ pcbr = I915_READ(VLV_PCBR);
+ if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ paddr = (dev_priv->mm.stolen_base +
+ (gtt->stolen_size - pctx_size));
+
+ pctx_paddr = (paddr & (~4095));
+ I915_WRITE(VLV_PCBR, pctx_paddr);
+ }
+}
+
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3840,6 +3982,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
+ dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ dev_priv->rps.rp1_freq);
+
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
@@ -3855,11 +4002,142 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
+static void cherryview_init_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ cherryview_setup_pctx(dev);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+ dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ dev_priv->rps.max_freq);
+
+ dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ dev_priv->rps.rp1_freq);
+
+ dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ dev_priv->rps.min_freq);
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
{
valleyview_cleanup_pctx(dev);
}
+static void cherryview_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+ int i;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ gtfifodbg = I915_READ(GTFIFODBG);
+ if (gtfifodbg) {
+ DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+ gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ cherryview_check_pctx(dev_priv);
+
+ /* 1a & 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 2a: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /* allows RC6 residency counter to work */
+ I915_WRITE(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ /* For now we assume BIOS is allocating and populating the PCBR */
+ pcbr = I915_READ(VLV_PCBR);
+
+ DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
+
+ /* 3: Enable RC6 */
+ if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+ (pcbr >> VLV_PCBR_ADDR_SHIFT))
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+ I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+ /* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* WaDisablePwrmtrEvent:chv (pre-production hw) */
+ I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+ I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+
+ /* 5: Enable RPS */
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+ DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq);
+
+ DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+ gen8_enable_rps_interrupts(dev);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3886,6 +4164,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
@@ -3906,9 +4185,11 @@ static void valleyview_enable_rps(struct drm_device *dev)
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
+
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
@@ -4668,33 +4949,60 @@ void intel_init_gt_powersave(struct drm_device *dev)
{
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ cherryview_init_gt_powersave(dev);
+ else if (IS_VALLEYVIEW(dev))
valleyview_init_gt_powersave(dev);
}
void intel_cleanup_gt_powersave(struct drm_device *dev)
{
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ return;
+ else if (IS_VALLEYVIEW(dev))
valleyview_cleanup_gt_powersave(dev);
}
+/**
+ * intel_suspend_gt_powersave - suspend PM work and helper threads
+ * @dev: drm device
+ *
+ * We don't want to disable RC6 or other features here, we just want
+ * to make sure any work we've queued has finished and won't bother
+ * us while we're suspended.
+ */
+void intel_suspend_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Interrupts should be disabled already to avoid re-arming. */
+ WARN_ON(intel_irqs_enabled(dev_priv));
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ cancel_work_sync(&dev_priv->rps.work);
+
+ /* Force GPU to min freq during suspend */
+ gen6_rps_idle(dev_priv);
+}
+
void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(dev->irq_enabled);
+ WARN_ON(intel_irqs_enabled(dev_priv));
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
- } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
- if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
- intel_runtime_pm_put(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ intel_suspend_gt_powersave(dev);
- cancel_work_sync(&dev_priv->rps.work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ cherryview_disable_rps(dev);
+ else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
@@ -4712,7 +5020,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ cherryview_enable_rps(dev);
+ } else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
@@ -4737,7 +5047,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
- } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+ } else if (INTEL_INFO(dev)->gen >= 6) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -5110,7 +5420,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
I915_WRITE(_3D_CHICKEN3,
- _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
I915_WRITE(COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
@@ -5345,10 +5655,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
- dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
- dev_priv->vlv_cdclk_freq);
-
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull:vlv */
@@ -5423,6 +5729,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ switch ((val >> 2) & 0x7) {
+ case 0:
+ case 1:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 2:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 3:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
+ dev_priv->mem_freq = 2000;
+ break;
+ case 4:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 5:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
+ dev_priv->mem_freq = 1600;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
@@ -5663,7 +5998,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- unsigned long irqflags;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -5679,21 +6013,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev)) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
- dev_priv->de_irq_mask[PIPE_B]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
- ~dev_priv->de_irq_mask[PIPE_B] |
- GEN8_PIPE_VBLANK);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
- dev_priv->de_irq_mask[PIPE_C]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
- ~dev_priv->de_irq_mask[PIPE_C] |
- GEN8_PIPE_VBLANK);
- POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ if (IS_BROADWELL(dev))
+ gen8_irq_power_well_post_enable(dev_priv);
}
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -5764,34 +6085,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
return true;
}
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id, bool enable)
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
{
- struct drm_device *dev = dev_priv->dev;
+ enum punit_power_well power_well_id = power_well->data;
u32 mask;
u32 state;
u32 ctrl;
- enum pipe pipe;
-
- if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- if (enable) {
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_INTEGRATED_CRI_CLK_VLV);
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- } else {
- for_each_pipe(pipe)
- assert_pll_disabled(dev_priv, pipe);
- /* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
- ~DPIO_CMNRST);
- }
- }
mask = PUNIT_PWRGT_MASK(power_well_id);
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -5819,28 +6119,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv,
out:
mutex_unlock(&dev_priv->rps.hw_lock);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- enum punit_power_well power_well_id = power_well->data;
-
- __vlv_set_power_well(dev_priv, power_well_id, enable);
}
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -5932,6 +6210,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, false);
}
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe pipe;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ for_each_pipe(pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -6081,6 +6406,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -6180,6 +6506,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = {
.is_enabled = vlv_power_well_enabled,
};
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_power_well_enable,
@@ -6240,10 +6573,25 @@ static struct i915_power_well vlv_power_wells[] = {
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &vlv_dpio_power_well_ops,
+ .ops = &vlv_dpio_cmn_power_well_ops,
},
};
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->data == power_well_id)
+ return power_well;
+ }
+
+ return NULL;
+}
+
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -6294,11 +6642,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+ /* nothing to do if common lane is already off */
+ if (!cmn->ops->is_enabled(dev_priv, cmn))
+ return;
+
+ /* If the display might be already active skip this */
+ if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
+ I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ disp2d->ops->enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ cmn->ops->disable(dev_priv, cmn);
+}
+
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
+
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(dev_priv);
+ mutex_unlock(&power_domains->lock);
+ }
+
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv);
@@ -6471,7 +6858,7 @@ void intel_init_pm(struct drm_device *dev)
(dev_priv->is_ddr3 == 1) ? "3" : "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
- pineview_disable_cxsr(dev);
+ intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
@@ -6554,7 +6941,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div;
@@ -6576,7 +6963,7 @@ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
}
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul;
@@ -6598,6 +6985,80 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
}
+static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int div, freq;
+
+ switch (dev_priv->rps.cz_freq) {
+ case 200:
+ div = 5;
+ break;
+ case 267:
+ div = 6;
+ break;
+ case 320:
+ case 333:
+ case 400:
+ div = 8;
+ break;
+ default:
+ return -1;
+ }
+
+ freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+
+ return freq;
+}
+
+static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+ int mul, opcode;
+
+ switch (dev_priv->rps.cz_freq) {
+ case 200:
+ mul = 5;
+ break;
+ case 267:
+ mul = 6;
+ break;
+ case 320:
+ case 333:
+ case 400:
+ mul = 8;
+ break;
+ default:
+ return -1;
+ }
+
+ opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
+
+ return opcode;
+}
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int ret = -1;
+
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ ret = chv_gpu_freq(dev_priv, val);
+ else if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = byt_gpu_freq(dev_priv, val);
+
+ return ret;
+}
+
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+ int ret = -1;
+
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ ret = chv_freq_opcode(dev_priv, val);
+ else if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = byt_freq_opcode(dev_priv, val);
+
+ return ret;
+}
+
void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6608,5 +7069,5 @@ void intel_pm_setup(struct drm_device *dev)
intel_gen6_powersave_work);
dev_priv->pm.suspended = false;
- dev_priv->pm.irqs_disabled = false;
+ dev_priv->pm._irqs_disabled = false;
}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index a5e783a9928a..fd4f66231d30 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -28,7 +28,6 @@
struct intel_renderstate_rodata {
const u32 *reloc;
- const u32 reloc_items;
const u32 *batch;
const u32 batch_items;
};
@@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state;
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
.reloc = gen ## _g ## _null_state_relocs, \
- .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
.batch = gen ## _g ## _null_state_batch, \
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
}
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
index 740538ad0977..56c1429d8a60 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = {
0x0000002c,
0x000001e0,
0x000001e4,
+ -1,
};
static const u32 gen6_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
index 6fa7ff2a1298..419e35a7b0ff 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = {
0x00000010,
0x00000018,
0x000001ec,
+ -1,
};
static const u32 gen7_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 5c875615d42a..75ef1b5de45c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = {
0x00000050,
0x00000060,
0x000003ec,
+ -1,
};
static const u32 gen8_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 279488addf3f..b3d8f766fa7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size)
return space;
}
-static inline int ring_space(struct intel_engine_cs *ring)
+static inline int ring_space(struct intel_ringbuffer *ringbuf)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
}
@@ -545,7 +544,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
else {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
ringbuf->last_retired_head = -1;
}
@@ -604,6 +603,8 @@ static int init_render_ring(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
+ if (ret)
+ return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
@@ -658,6 +659,13 @@ static int init_render_ring(struct intel_engine_cs *ring)
static void render_ring_cleanup(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->semaphore_obj) {
+ i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
+ drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
+ dev_priv->semaphore_obj = NULL;
+ }
if (ring->scratch.obj == NULL)
return;
@@ -671,29 +679,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
ring->scratch.obj = NULL;
}
+static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 8
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *waiter;
+ int i, ret, num_rings;
+
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+
+ for_each_ring(waiter, dev_priv, i) {
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+ continue;
+
+ intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+ intel_ring_emit(signaller, lower_32_bits(gtt_offset));
+ intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, 0);
+ intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->id));
+ intel_ring_emit(signaller, 0);
+ }
+
+ return 0;
+}
+
+static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 6
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *waiter;
+ int i, ret, num_rings;
+
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+
+ for_each_ring(waiter, dev_priv, i) {
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+ continue;
+
+ intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
+ MI_FLUSH_DW_OP_STOREDW);
+ intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
+ MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->id));
+ intel_ring_emit(signaller, 0);
+ }
+
+ return 0;
+}
+
static int gen6_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
- int i, ret;
+ int i, ret, num_rings;
- /* NB: In order to be able to do semaphore MBOX updates for varying
- * number of rings, it's easiest if we round up each individual update
- * to a multiple of 2 (since ring updates must always be a multiple of
- * 2) even though the actual update only requires 3 dwords.
- */
-#define MBOX_UPDATE_DWORDS 4
- if (i915_semaphore_is_enabled(dev))
- num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
- else
- return intel_ring_begin(signaller, num_dwords);
+#define MBOX_UPDATE_DWORDS 3
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
+#undef MBOX_UPDATE_DWORDS
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
return ret;
-#undef MBOX_UPDATE_DWORDS
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
@@ -701,15 +776,13 @@ static int gen6_signal(struct intel_engine_cs *signaller,
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
- intel_ring_emit(signaller, MI_NOOP);
- } else {
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
}
}
+ /* If num_dwords was rounded, make sure the tail pointer is correct */
+ if (num_rings % 2 == 0)
+ intel_ring_emit(signaller, MI_NOOP);
+
return 0;
}
@@ -727,7 +800,11 @@ gen6_add_request(struct intel_engine_cs *ring)
{
int ret;
- ret = ring->semaphore.signal(ring, 4);
+ if (ring->semaphore.signal)
+ ret = ring->semaphore.signal(ring, 4);
+ else
+ ret = intel_ring_begin(ring, 4);
+
if (ret)
return ret;
@@ -754,6 +831,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
* @signaller - ring which has, or will signal
* @seqno - seqno which the waiter will block on
*/
+
+static int
+gen8_ring_sync(struct intel_engine_cs *waiter,
+ struct intel_engine_cs *signaller,
+ u32 seqno)
+{
+ struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+ int ret;
+
+ ret = intel_ring_begin(waiter, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter,
+ lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_emit(waiter,
+ upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_advance(waiter);
+ return 0;
+}
+
static int
gen6_ring_sync(struct intel_engine_cs *waiter,
struct intel_engine_cs *signaller,
@@ -901,7 +1004,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0)
- ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
@@ -916,7 +1019,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0)
- ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1109,7 +1212,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
GT_PARITY_ERROR(dev)));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1129,7 +1232,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring)
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
else
I915_WRITE_IMR(ring, ~0);
- ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1147,7 +1250,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+ gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1167,7 +1270,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
- snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+ gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1329,6 +1432,7 @@ static int init_status_page(struct intel_engine_cs *ring)
struct drm_i915_gem_object *obj;
if ((obj = ring->status_page.obj) == NULL) {
+ unsigned flags;
int ret;
obj = i915_gem_alloc_object(ring->dev, 4096);
@@ -1341,7 +1445,20 @@ static int init_status_page(struct intel_engine_cs *ring)
if (ret)
goto err_unref;
- ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+ flags = 0;
+ if (!HAS_LLC(ring->dev))
+ /* On g33, we cannot place HWS above 256MiB, so
+ * restrict its pinning to the low mappable arena.
+ * Though this restriction is not documented for
+ * gen4, gen5, or byt, they also behave similarly
+ * and hang if the HWS is placed at the top of the
+ * GTT. To generalise, it appears that all !llc
+ * platforms have issues with us placing the HWS
+ * above the mappable region (even though we never
+ * actualy map it).
+ */
+ flags |= PIN_MAPPABLE;
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
if (ret) {
err_unref:
drm_gem_object_unreference(&obj->base);
@@ -1378,15 +1495,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
-static int allocate_ring_buffer(struct intel_engine_cs *ring)
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
+ if (!ringbuf->obj)
+ return;
+
+ iounmap(ringbuf->virtual_start);
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
+ drm_gem_object_unreference(&ringbuf->obj->base);
+ ringbuf->obj = NULL;
+}
+
+static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_object *obj;
int ret;
- if (intel_ring_initialized(ring))
+ if (ringbuf->obj)
return 0;
obj = NULL;
@@ -1397,6 +1524,9 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
if (obj == NULL)
return -ENOMEM;
+ /* mark ring buffers as read-only from GPU side by default */
+ obj->gt_ro = 1;
+
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
goto err_unref;
@@ -1455,7 +1585,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = allocate_ring_buffer(ring);
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
goto error;
@@ -1496,11 +1626,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
- iounmap(ringbuf->virtual_start);
-
- i915_gem_object_ggtt_unpin(ringbuf->obj);
- drm_gem_object_unreference(&ringbuf->obj->base);
- ringbuf->obj = NULL;
+ intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@@ -1526,7 +1652,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n)
return 0;
}
@@ -1549,7 +1675,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
return 0;
}
@@ -1578,7 +1704,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
break;
@@ -1630,7 +1756,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
return 0;
}
@@ -1746,14 +1872,15 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
BUG_ON(ring->outstanding_lazy_seqno);
- if (INTEL_INFO(ring->dev)->gen >= 6) {
+ if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
- if (HAS_VEBOX(ring->dev))
+ if (HAS_VEBOX(dev))
I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
@@ -1941,45 +2068,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_object *obj;
+ int ret;
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_INFO(dev)->gen >= 8) {
+ if (i915_semaphore_is_enabled(dev)) {
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else {
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else
+ dev_priv->semaphore_obj = obj;
+ }
+ }
+ ring->add_request = gen6_add_request;
+ ring->flush = gen8_render_ring_flush;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (i915_semaphore_is_enabled(dev)) {
+ WARN_ON(!dev_priv->semaphore_obj);
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_rcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
+ } else if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush;
- if (INTEL_INFO(dev)->gen >= 8) {
- ring->flush = gen8_render_ring_flush;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- } else {
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- }
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform.
- * And there is no VCS2 ring on the pre-gen8 platform. So the
- * semaphore between RCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between VCS2 and RCS later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between RCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between VCS2 and RCS later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
@@ -2007,6 +2163,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
+
if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev))
@@ -2024,9 +2181,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
- struct drm_i915_gem_object *obj;
- int ret;
-
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n");
@@ -2157,31 +2311,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform.
- * And there is no VCS2 ring on the pre-gen8 platform. So the
- * semaphore between VCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between VCS2 and VCS later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
} else {
ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush;
@@ -2218,7 +2373,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
return -EINVAL;
}
- ring->name = "bds2_ring";
+ ring->name = "bsd2 ring";
ring->id = VCS2;
ring->write_tail = ring_write_tail;
@@ -2233,25 +2388,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on the pre-gen8. And there
- * is no bsd2 ring on the pre-gen8. So now the semaphore_register
- * between VCS2 and other ring is initialized as invalid.
- * Gen8 will initialize the sema between VCS2 and other ring later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
@@ -2277,30 +2418,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between BCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between BCS and VCS2 later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform. And
- * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
- * between BCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between BCS and VCS2 later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
@@ -2327,24 +2476,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e72017bdcd7f..ed5941078f92 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -40,6 +40,32 @@ struct intel_hw_status_page {
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
+ * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
+ */
+#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SIGNAL_OFFSET(__ring, to) \
+ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+ (i915_semaphore_seqno_size * (to)))
+
+#define GEN8_WAIT_OFFSET(__ring, from) \
+ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+ (i915_semaphore_seqno_size * (__ring)->id))
+
+#define GEN8_RING_SEMAPHORE_INIT do { \
+ if (!dev_priv->semaphore_obj) { \
+ break; \
+ } \
+ ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
+ ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
+ ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
+ ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
+ ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
+ ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+ } while(0)
+
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
@@ -127,15 +153,55 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring);
+ /* GEN8 signal/wait table - never trust comments!
+ * signal to signal to signal to signal to signal to
+ * RCS VCS BCS VECS VCS2
+ * --------------------------------------------------------------------
+ * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
+ * |-------------------------------------------------------------------
+ * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
+ * |-------------------------------------------------------------------
+ * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
+ * |-------------------------------------------------------------------
+ * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
+ * |-------------------------------------------------------------------
+ * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
+ * |-------------------------------------------------------------------
+ *
+ * Generalization:
+ * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
+ * ie. transpose of g(x, y)
+ *
+ * sync from sync from sync from sync from sync from
+ * RCS VCS BCS VECS VCS2
+ * --------------------------------------------------------------------
+ * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
+ * |-------------------------------------------------------------------
+ * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
+ * |-------------------------------------------------------------------
+ * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
+ * |-------------------------------------------------------------------
+ * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
+ * |-------------------------------------------------------------------
+ * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
+ * |-------------------------------------------------------------------
+ *
+ * Generalization:
+ * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
+ * ie. transpose of f(x, y)
+ */
struct {
u32 sync_seqno[I915_NUM_RINGS-1];
- struct {
- /* our mbox written by others */
- u32 wait[I915_NUM_RINGS];
- /* mboxes this ring signals to */
- u32 signal[I915_NUM_RINGS];
- } mbox;
+ union {
+ struct {
+ /* our mbox written by others */
+ u32 wait[I915_NUM_RINGS];
+ /* mboxes this ring signals to */
+ u32 signal[I915_NUM_RINGS];
+ } mbox;
+ u64 signal_ggtt[I915_NUM_RINGS];
+ };
/* AKA wait() */
int (*sync_to)(struct intel_engine_cs *ring,
@@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
int idx;
/*
- * cs -> 0 = vcs, 1 = bcs
- * vcs -> 0 = bcs, 1 = cs,
- * bcs -> 0 = cs, 1 = vcs.
+ * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+ * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+ * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+ * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+ * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
idx = (other - ring) - 1;
@@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
-static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
+static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
- return ring->buffer->tail;
+ return ringbuf->tail;
}
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9a17b4e92ef4..168c6652cda1 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -218,7 +218,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
sprctl |= SP_ENABLE;
- intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
+ pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -283,7 +284,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
- intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
static int
@@ -406,7 +407,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
- intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
+ true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -486,7 +488,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
static int
@@ -606,7 +608,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
- intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
+ pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -681,7 +684,7 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
static void
@@ -819,6 +822,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_i915_gem_object *old_obj = intel_plane->obj;
@@ -1006,6 +1010,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
*/
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret)
@@ -1039,6 +1045,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
else
intel_plane->disable_plane(plane, crtc);
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
+
if (!primary_was_enabled && primary_enabled)
intel_post_enable_primary(crtc);
}
@@ -1068,6 +1076,7 @@ intel_disable_plane(struct drm_plane *plane)
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
+ enum pipe pipe;
if (!plane->fb)
return 0;
@@ -1076,6 +1085,7 @@ intel_disable_plane(struct drm_plane *plane)
return -EINVAL;
intel_crtc = to_intel_crtc(plane->crtc);
+ pipe = intel_crtc->pipe;
if (intel_crtc->active) {
bool primary_was_enabled = intel_crtc->primary_enabled;
@@ -1094,6 +1104,8 @@ intel_disable_plane(struct drm_plane *plane)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_plane->obj);
+ i915_gem_track_fb(intel_plane->obj, NULL,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
intel_plane->obj = NULL;
@@ -1114,7 +1126,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
@@ -1128,13 +1139,12 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, set->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
- plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
ret = intel_plane->update_colorkey(plane, set);
@@ -1147,7 +1157,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
@@ -1157,13 +1166,12 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, get->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
- plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
intel_plane->get_colorkey(plane, get);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f6fef7ac069..e81bc3bdc533 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,8 +231,8 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
}
/* WaRsForcewakeWaitTC0:vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -250,9 +250,10 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ gen6_gt_check_fifodbg(dev_priv);
}
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
@@ -315,7 +316,7 @@ static void gen6_force_wake_timer(unsigned long arg)
intel_runtime_pm_put(dev_priv);
}
-static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -357,16 +358,12 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
- } else {
- dev_priv->uncore.forcewake_count = 0;
- dev_priv->uncore.fw_rendercount = 0;
- dev_priv->uncore.fw_mediacount = 0;
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void intel_uncore_early_sanitize(struct drm_device *dev)
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -389,7 +386,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_forcewake_reset(dev, restore_forcewake);
}
void intel_uncore_sanitize(struct drm_device *dev)
@@ -469,16 +466,43 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((reg) < 0x40000 && (reg) != FORCEWAKE)
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
- (((reg) >= 0x2000 && (reg) < 0x4000) ||\
- ((reg) >= 0x5000 && (reg) < 0x8000) ||\
- ((reg) >= 0xB000 && (reg) < 0x12000) ||\
- ((reg) >= 0x2E000 && (reg) < 0x30000))
+#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
- (((reg) >= 0x12000 && (reg) < 0x14000) ||\
- ((reg) >= 0x22000 && (reg) < 0x24000) ||\
- ((reg) >= 0x30000 && (reg) < 0x40000))
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x4000) || \
+ REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0xB000, 0x12000) || \
+ REG_RANGE((reg), 0x2E000, 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x22000, 0x24000) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x4000) || \
+ REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0x8300, 0x8500) || \
+ REG_RANGE((reg), 0xB000, 0xC000) || \
+ REG_RANGE((reg), 0xE000, 0xE800))
+
+#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x8800, 0x8900) || \
+ REG_RANGE((reg), 0xD000, 0xD800) || \
+ REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x1A000, 0x1C000) || \
+ REG_RANGE((reg), 0x1E800, 0x1EA00) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x4000, 0x5000) || \
+ REG_RANGE((reg), 0x8000, 0x8300) || \
+ REG_RANGE((reg), 0x8500, 0x8600) || \
+ REG_RANGE((reg), 0x9000, 0xB000) || \
+ REG_RANGE((reg), 0xC000, 0xC800) || \
+ REG_RANGE((reg), 0xF000, 0x10000) || \
+ REG_RANGE((reg), 0x14000, 0x14400) || \
+ REG_RANGE((reg), 0x22000, 0x24000))
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -490,20 +514,30 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
}
static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
+ bool before)
{
+ const char *op = read ? "reading" : "writing to";
+ const char *when = before ? "before" : "after";
+
+ if (!i915.mmio_debug)
+ return;
+
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unknown unclaimed register before writing to %x\n",
- reg);
+ WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
+ when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
+ if (i915.mmio_debug)
+ return;
+
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unclaimed write to %x\n", reg);
+ DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
@@ -540,6 +574,7 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (dev_priv->uncore.forcewake_count == 0 && \
NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -550,6 +585,7 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
+ hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
REG_READ_FOOTER; \
}
@@ -573,7 +609,35 @@ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_FOOTER; \
}
+#define __chv_read(x) \
+static u##x \
+chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ unsigned fwengine = 0; \
+ REG_READ_HEADER(x); \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ REG_READ_FOOTER; \
+}
+__chv_read(8)
+__chv_read(16)
+__chv_read(32)
+__chv_read(64)
__vlv_read(8)
__vlv_read(16)
__vlv_read(32)
@@ -591,6 +655,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
+#undef __chv_read
#undef __vlv_read
#undef __gen6_read
#undef __gen5_read
@@ -647,12 +712,13 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- hsw_unclaimed_reg_clear(dev_priv, reg); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- hsw_unclaimed_reg_check(dev_priv, reg); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+ hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
@@ -681,6 +747,7 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -692,9 +759,43 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
} else { \
__raw_i915_write##x(dev_priv, reg, val); \
} \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+ hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
+#define __chv_write(x) \
+static void \
+chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ unsigned fwengine = 0; \
+ bool shadowed = is_gen8_shadowed(dev_priv, reg); \
+ REG_WRITE_HEADER; \
+ if (!shadowed) { \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ REG_WRITE_FOOTER; \
+}
+
+__chv_write(8)
+__chv_write(16)
+__chv_write(32)
+__chv_write(64)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
@@ -716,6 +817,7 @@ __gen4_write(16)
__gen4_write(32)
__gen4_write(64)
+#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
@@ -731,7 +833,7 @@ void intel_uncore_init(struct drm_device *dev)
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
- intel_uncore_early_sanitize(dev);
+ intel_uncore_early_sanitize(dev, false);
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
@@ -779,14 +881,26 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
- dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
- dev_priv->uncore.funcs.mmio_writew = gen8_write16;
- dev_priv->uncore.funcs.mmio_writel = gen8_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ if (IS_CHERRYVIEW(dev)) {
+ dev_priv->uncore.funcs.mmio_writeb = chv_write8;
+ dev_priv->uncore.funcs.mmio_writew = chv_write16;
+ dev_priv->uncore.funcs.mmio_writel = chv_write32;
+ dev_priv->uncore.funcs.mmio_writeq = chv_write64;
+ dev_priv->uncore.funcs.mmio_readb = chv_read8;
+ dev_priv->uncore.funcs.mmio_readw = chv_read16;
+ dev_priv->uncore.funcs.mmio_readl = chv_read32;
+ dev_priv->uncore.funcs.mmio_readq = chv_read64;
+
+ } else {
+ dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen8_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen8_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ }
break;
case 7:
case 6:
@@ -912,7 +1026,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad)
return -EINVAL;
- if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1053,18 +1167,16 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev)
{
- switch (INTEL_INFO(dev)->gen) {
- case 8:
- case 7:
- case 6: return gen6_do_reset(dev);
- case 5: return ironlake_do_reset(dev);
- case 4:
- if (IS_G4X(dev))
- return g4x_do_reset(dev);
- else
- return i965_do_reset(dev);
- default: return -ENODEV;
- }
+ if (INTEL_INFO(dev)->gen >= 6)
+ return gen6_do_reset(dev);
+ else if (IS_GEN5(dev))
+ return ironlake_do_reset(dev);
+ else if (IS_G4X(dev))
+ return g4x_do_reset(dev);
+ else if (IS_GEN4(dev))
+ return i965_do_reset(dev);
+ else
+ return -ENODEV;
}
void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index cf11ee68a6d9..80de23d9b9c9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -280,7 +280,7 @@ static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index f9fe390920f1..45f04dea0ac2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1562,19 +1562,9 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj =
- drm_mode_object_find(connector->dev, enc_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f12388967856..c99c50de3226 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,6 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on MSM_IOMMU
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
select DRM_KMS_HELPER
select SHMEM
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 85d615e7d62f..a8a144b38eaa 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <[email protected]> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -203,6 +203,15 @@ enum a2xx_rb_copy_sample_select {
SAMPLE_0123 = 6,
};
+enum a2xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_MIN_DST_SRC = 2,
+ BLEND_MAX_DST_SRC = 3,
+ BLEND_DST_MINUS_SRC = 4,
+ BLEND_DST_PLUS_SRC_BIAS = 5,
+};
+
enum adreno_mmu_clnt_beh {
BEH_NEVR = 0,
BEH_TRAN_RNG = 1,
@@ -890,6 +899,39 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+}
#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
@@ -963,7 +1005,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend
}
#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
-static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val)
{
return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
}
@@ -981,7 +1023,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend
}
#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
-static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val)
{
return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index a7be56163d23..303e8a9e91a5 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <[email protected]> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -41,31 +41,11 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum a3xx_render_mode {
- RB_RENDERING_PASS = 0,
- RB_TILING_PASS = 1,
- RB_RESOLVE_PASS = 2,
-};
-
enum a3xx_tile_mode {
LINEAR = 0,
TILE_32X32 = 2,
};
-enum a3xx_threadmode {
- MULTI = 0,
- SINGLE = 1,
-};
-
-enum a3xx_instrbuffermode {
- BUFFER = 1,
-};
-
-enum a3xx_threadsize {
- TWO_QUADS = 0,
- FOUR_QUADS = 1,
-};
-
enum a3xx_state_block_id {
HLSQ_BLOCK_ID_TP_TEX = 2,
HLSQ_BLOCK_ID_TP_MIPMAP = 3,
@@ -169,6 +149,8 @@ enum a3xx_color_fmt {
RB_R8G8B8A8_UNORM = 8,
RB_Z16_UNORM = 12,
RB_A8_UNORM = 20,
+ RB_R16G16B16A16_FLOAT = 27,
+ RB_R32G32B32A32_FLOAT = 51,
};
enum a3xx_color_swap {
@@ -178,12 +160,6 @@ enum a3xx_color_swap {
XYZW = 3,
};
-enum a3xx_msaa_samples {
- MSAA_ONE = 0,
- MSAA_TWO = 1,
- MSAA_FOUR = 2,
-};
-
enum a3xx_sp_perfcounter_select {
SP_FS_CFLOW_INSTRUCTIONS = 12,
SP_FS_FULL_ALU_INSTRUCTIONS = 14,
@@ -191,21 +167,45 @@ enum a3xx_sp_perfcounter_select {
SP_ALU_ACTIVE_CYCLES = 29,
};
-enum adreno_rb_copy_control_mode {
- RB_COPY_RESOLVE = 1,
- RB_COPY_DEPTH_STENCIL = 5,
+enum a3xx_rop_code {
+ ROP_CLEAR = 0,
+ ROP_NOR = 1,
+ ROP_AND_INVERTED = 2,
+ ROP_COPY_INVERTED = 3,
+ ROP_AND_REVERSE = 4,
+ ROP_INVERT = 5,
+ ROP_XOR = 6,
+ ROP_NAND = 7,
+ ROP_AND = 8,
+ ROP_EQUIV = 9,
+ ROP_NOOP = 10,
+ ROP_OR_INVERTED = 11,
+ ROP_COPY = 12,
+ ROP_OR_REVERSE = 13,
+ ROP_OR = 14,
+ ROP_SET = 15,
+};
+
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
};
enum a3xx_tex_filter {
A3XX_TEX_NEAREST = 0,
A3XX_TEX_LINEAR = 1,
+ A3XX_TEX_ANISO = 2,
};
enum a3xx_tex_clamp {
A3XX_TEX_REPEAT = 0,
A3XX_TEX_CLAMP_TO_EDGE = 1,
A3XX_TEX_MIRROR_REPEAT = 2,
- A3XX_TEX_CLAMP_NONE = 3,
+ A3XX_TEX_CLAMP_TO_BORDER = 3,
+ A3XX_TEX_MIRROR_CLAMP = 4,
};
enum a3xx_tex_swiz {
@@ -316,6 +316,7 @@ enum a3xx_tex_type {
#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
+#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001
#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
@@ -549,6 +550,10 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
+
+#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
+
#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
@@ -556,6 +561,9 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000
+#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000
#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
@@ -620,8 +628,26 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
}
#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
+#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
+}
#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
@@ -743,6 +769,7 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode va
#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
+#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008
#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
@@ -751,6 +778,10 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
}
#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
+#define A3XX_RB_RENDER_CONTROL_XCOORD 0x00004000
+#define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000
+#define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000
+#define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
@@ -796,7 +827,7 @@ static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4
#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
-static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
+static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
{
return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
}
@@ -856,7 +887,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -874,7 +905,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -957,17 +988,24 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples
{
return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008
#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
-#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
-#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
{
- return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+ return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
}
#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
@@ -1005,6 +1043,12 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
{
return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
}
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
@@ -1019,6 +1063,7 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endi
}
#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
+#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
@@ -1044,7 +1089,7 @@ static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
@@ -1172,6 +1217,8 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
}
#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
@@ -1179,7 +1226,23 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
+#define REG_A3XX_VGT_BIN_BASE 0x000021e1
+
+#define REG_A3XX_VGT_BIN_SIZE 0x000021e2
+
#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK;
+}
#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
@@ -1203,6 +1266,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
}
#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
@@ -1232,6 +1296,7 @@ static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize
}
#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
@@ -1242,6 +1307,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
}
#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
+#define A3XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
+#define A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
@@ -1312,10 +1383,36 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
}
#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK;
+}
+
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; }
-#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; }
-#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; }
#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
@@ -1323,7 +1420,9 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
-#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; }
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
@@ -1438,6 +1537,12 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
{
return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
}
+#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
+#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
@@ -1462,12 +1567,13 @@ static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val
}
#define REG_A3XX_VPC_ATTR 0x00002280
-#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
+#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
{
return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
}
+#define A3XX_VPC_ATTR_PSIZE 0x00000200
#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
@@ -1522,11 +1628,11 @@ static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
{
return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
}
-#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
-#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
-static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
+#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000
+#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22
+static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val)
{
- return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
+ return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK;
}
#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
@@ -1569,6 +1675,7 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
}
#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -1742,6 +1849,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
}
#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
@@ -1802,6 +1910,13 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
+static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
@@ -1914,6 +2029,42 @@ static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
+#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070
+#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071
+#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072
+
+#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073
+
+#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074
+
+#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075
+
+#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c
+
#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -2080,6 +2231,8 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
}
#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
+#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6
+
#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
@@ -2117,6 +2270,39 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+}
#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
@@ -2152,6 +2338,12 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
{
return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
}
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20
+static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
+}
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
@@ -2170,6 +2362,7 @@ static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
#define REG_A3XX_TEX_CONST_0 0x00000000
#define A3XX_TEX_CONST_0_TILED 0x00000001
+#define A3XX_TEX_CONST_0_SRGB 0x00000004
#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
@@ -2206,6 +2399,7 @@ static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
{
return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
}
+#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000
#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 942e09d898a8..2773600c9488 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -392,13 +392,10 @@ static const unsigned int a3xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- struct drm_device *dev = gpu->dev;
int i;
adreno_show(gpu, m);
- mutex_lock(&dev->struct_mutex);
-
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
@@ -418,8 +415,6 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
}
gpu->funcs->pm_suspend(gpu);
-
- mutex_unlock(&dev->struct_mutex);
}
#endif
@@ -685,6 +680,8 @@ static int a3xx_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,adreno-3xx" },
+ /* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
{}
};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index bb9a8ca0507b..85ff66cbddd6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -19,6 +19,11 @@
#define __A3XX_GPU_H__
#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
#include "a3xx.xml.h"
struct a3xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index d6e6ce2d1abd..9de19ac2e86c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <[email protected]> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -87,15 +87,6 @@ enum adreno_rb_blend_factor {
FACTOR_SRC_ALPHA_SATURATE = 16,
};
-enum adreno_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_MIN_DST_SRC = 2,
- BLEND_MAX_DST_SRC = 3,
- BLEND_DST_MINUS_SRC = 4,
- BLEND_DST_PLUS_SRC_BIAS = 5,
-};
-
enum adreno_rb_surface_endian {
ENDIAN_NONE = 0,
ENDIAN_8IN16 = 1,
@@ -116,6 +107,39 @@ enum adreno_rb_depth_format {
DEPTHX_24_8 = 1,
};
+enum adreno_rb_copy_control_mode {
+ RB_COPY_RESOLVE = 1,
+ RB_COPY_CLEAR = 2,
+ RB_COPY_DEPTH_STENCIL = 5,
+};
+
+enum a3xx_render_mode {
+ RB_RENDERING_PASS = 0,
+ RB_TILING_PASS = 1,
+ RB_RESOLVE_PASS = 2,
+ RB_COMPUTE_PASS = 3,
+};
+
+enum a3xx_msaa_samples {
+ MSAA_ONE = 0,
+ MSAA_TWO = 1,
+ MSAA_FOUR = 2,
+};
+
+enum a3xx_threadmode {
+ MULTI = 0,
+ SINGLE = 1,
+};
+
+enum a3xx_instrbuffermode {
+ BUFFER = 1,
+};
+
+enum a3xx_threadsize {
+ TWO_QUADS = 0,
+ FOUR_QUADS = 1,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
@@ -264,6 +288,8 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
#define REG_AXXX_CP_INT_ACK 0x000001f4
#define REG_AXXX_CP_ME_CNTL 0x000001f6
+#define AXXX_CP_ME_CNTL_BUSY 0x20000000
+#define AXXX_CP_ME_CNTL_HALT 0x10000000
#define REG_AXXX_CP_ME_STATUS 0x000001f7
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 28ca8cd8b09e..655ce5b14ad0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -91,9 +91,17 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
DBG("%s", gpu->name);
+ ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ if (ret) {
+ gpu->rb_iova = 0;
+ dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
+ return ret;
+ }
+
/* Setup REG_CP_RB_CNTL: */
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
/* size is log2(quad-words): */
@@ -362,8 +370,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
+ mutex_lock(&drm->struct_mutex);
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->memptrs_bo)) {
ret = PTR_ERR(gpu->memptrs_bo);
gpu->memptrs_bo = NULL;
@@ -371,13 +381,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
+ gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo);
if (!gpu->memptrs) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
- ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
+ ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id,
&gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index ae992c71703f..4eee0ec8f069 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <[email protected]> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -105,6 +105,7 @@ enum pc_di_index_size {
enum pc_di_vis_cull_mode {
IGNORE_VISIBILITY = 0,
+ USE_VISIBILITY = 1,
};
enum adreno_pm4_packet_type {
@@ -163,6 +164,11 @@ enum adreno_pm4_type3_packets {
CP_SET_BIN = 76,
CP_TEST_TWO_MEMS = 113,
CP_WAIT_FOR_ME = 19,
+ CP_SET_DRAW_STATE = 67,
+ CP_DRAW_INDX_OFFSET = 56,
+ CP_DRAW_INDIRECT = 40,
+ CP_DRAW_INDX_INDIRECT = 41,
+ CP_DRAW_AUTO = 36,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -232,6 +238,211 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
}
+#define REG_CP_DRAW_INDX_0 0x00000000
+#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_1 0x00000001
+#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000
+#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_0 0x00000000
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_1 0x00000001
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000
+#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_2 0x00000002
+#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000700
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000
+#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK;
+}
+
+#define REG_CP_SET_DRAW_STATE_0 0x00000000
+#define CP_SET_DRAW_STATE_0_COUNT__MASK 0x0000ffff
+#define CP_SET_DRAW_STATE_0_COUNT__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE_0_COUNT(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE_0_COUNT__SHIFT) & CP_SET_DRAW_STATE_0_COUNT__MASK;
+}
+#define CP_SET_DRAW_STATE_0_DIRTY 0x00010000
+#define CP_SET_DRAW_STATE_0_DISABLE 0x00020000
+#define CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS 0x00040000
+#define CP_SET_DRAW_STATE_0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE_0_GROUP_ID__MASK 0x1f000000
+#define CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT 24
+static inline uint32_t CP_SET_DRAW_STATE_0_GROUP_ID(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE_0_GROUP_ID__MASK;
+}
+
+#define REG_CP_SET_DRAW_STATE_1 0x00000001
+#define CP_SET_DRAW_STATE_1_ADDR__MASK 0xffffffff
+#define CP_SET_DRAW_STATE_1_ADDR__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE_1_ADDR(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE_1_ADDR__SHIFT) & CP_SET_DRAW_STATE_1_ADDR__MASK;
+}
+
#define REG_CP_SET_BIN_0 0x00000000
#define REG_CP_SET_BIN_1 0x00000001
@@ -262,5 +473,21 @@ static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
}
+#define REG_CP_SET_BIN_DATA_0 0x00000000
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA_1 0x00000001
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 87be647e3825..0f1f5b9459a5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <[email protected]> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 747a6ef4211f..d468f86f637c 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <[email protected]> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 48e03acf19bf..da8740054cdf 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <[email protected]> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 7f7aadef8a82..a125a7e32742 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -123,7 +123,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
for (i = 0; i < config->hpd_reg_cnt; i++) {
struct regulator *reg;
- reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]);
+ reg = devm_regulator_get_exclusive(&pdev->dev,
+ config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
@@ -138,7 +139,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
for (i = 0; i < config->pwr_reg_cnt; i++) {
struct regulator *reg;
- reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]);
+ reg = devm_regulator_get_exclusive(&pdev->dev,
+ config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
@@ -266,37 +268,56 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
int gpio = of_get_named_gpio(of_node, name, 0);
if (gpio < 0) {
- dev_err(dev, "failed to get gpio: %s (%d)\n",
- name, gpio);
- gpio = -1;
+ char name2[32];
+ snprintf(name2, sizeof(name2), "%s-gpio", name);
+ gpio = of_get_named_gpio(of_node, name2, 0);
+ if (gpio < 0) {
+ dev_err(dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
}
return gpio;
}
- /* TODO actually use DT.. */
- static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
- static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
- static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
- static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
- static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+ if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
+ static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
+ static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
+ static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+ static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
+ static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+ config.phy_init = hdmi_phy_8x74_init;
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.pwr_reg_names = pwr_reg_names;
+ config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_freq = hpd_clk_freq;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ config.pwr_clk_names = pwr_clk_names;
+ config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
+ config.shared_irq = true;
+ } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
+ static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
+ static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
+ config.phy_init = hdmi_phy_8960_init;
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8660")) {
+ config.phy_init = hdmi_phy_8x60_init;
+ } else {
+ dev_err(dev, "unknown phy: %s\n", of_node->name);
+ }
- config.phy_init = hdmi_phy_8x74_init;
config.mmio_name = "core_physical";
- config.hpd_reg_names = hpd_reg_names;
- config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
- config.pwr_reg_names = pwr_reg_names;
- config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
- config.hpd_clk_names = hpd_clk_names;
- config.hpd_freq = hpd_clk_freq;
- config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
- config.pwr_clk_names = pwr_clk_names;
- config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
- config.shared_irq = true;
+ config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm");
#else
static const char *hpd_clk_names[] = {
@@ -373,7 +394,9 @@ static int hdmi_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,hdmi-tx" },
+ { .compatible = "qcom,hdmi-tx-8074" },
+ { .compatible = "qcom,hdmi-tx-8960" },
+ { .compatible = "qcom,hdmi-tx-8660" },
{}
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 9d7723c6528a..b981995410b5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -96,6 +96,7 @@ struct hdmi_platform_config {
/* gpio's: */
int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+ int mux_lpm_gpio;
/* older devices had their own irq, mdp5+ it is shared w/ mdp: */
bool shared_irq;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e2636582cfd7..e89fe053d375 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <[email protected]> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -148,9 +148,9 @@ static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*
static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
-static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
-static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+static inline uint32_t REG_HDMI_ACR_0(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
#define HDMI_ACR_0_CTS__MASK 0xfffff000
#define HDMI_ACR_0_CTS__SHIFT 12
static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
@@ -158,7 +158,7 @@ static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
}
-static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
+static inline uint32_t REG_HDMI_ACR_1(enum hdmi_acr_cts i0) { return 0x000000c8 + 0x8*i0; }
#define HDMI_ACR_1_N__MASK 0xffffffff
#define HDMI_ACR_1_N__SHIFT 0
static inline uint32_t HDMI_ACR_1_N(uint32_t val)
@@ -552,6 +552,103 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
#define REG_HDMI_8960_PHY_REG11 0x0000042c
#define REG_HDMI_8960_PHY_REG12 0x00000430
+#define HDMI_8960_PHY_REG12_SW_RESET 0x00000020
+#define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080
+
+#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000434
+
+#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000438
+
+#define REG_HDMI_8960_PHY_REG_MISC0 0x0000043c
+
+#define REG_HDMI_8960_PHY_REG13 0x00000440
+
+#define REG_HDMI_8960_PHY_REG14 0x00000444
+
+#define REG_HDMI_8960_PHY_REG15 0x00000448
+
+#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000500
+
+#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000504
+
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000508
+
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000050c
+
+#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000510
+
+#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000514
+
+#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000518
+#define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002
+#define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000051c
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000520
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000524
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000528
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000052c
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000530
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000534
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000538
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000053c
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000540
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000544
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000548
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000054c
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000550
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000554
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000558
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000055c
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000560
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000564
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000568
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000056c
+
+#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000570
+
+#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000574
+
+#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000578
+
+#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000057c
+
+#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000580
+
+#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000584
+
+#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000588
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000058c
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000590
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000594
+
+#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000598
+#define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001
+
+#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000059c
#define REG_HDMI_8x74_ANA_CFG0 0x00000000
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 76960faae38f..4aca2a3c667c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -63,7 +63,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_MUX_SEL", config->mux_en_gpio, ret);
+ "HDMI_MUX_EN", config->mux_en_gpio, ret);
goto error4;
}
gpio_set_value_cansleep(config->mux_en_gpio, 1);
@@ -78,6 +78,19 @@ static int gpio_config(struct hdmi *hdmi, bool on)
}
gpio_set_value_cansleep(config->mux_sel_gpio, 0);
}
+
+ if (config->mux_lpm_gpio != -1) {
+ ret = gpio_request(config->mux_lpm_gpio,
+ "HDMI_MUX_LPM");
+ if (ret) {
+ dev_err(dev->dev,
+ "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_LPM",
+ config->mux_lpm_gpio, ret);
+ goto error6;
+ }
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
+ }
DBG("gpio on");
} else {
gpio_free(config->ddc_clk_gpio);
@@ -93,11 +106,19 @@ static int gpio_config(struct hdmi *hdmi, bool on)
gpio_set_value_cansleep(config->mux_sel_gpio, 1);
gpio_free(config->mux_sel_gpio);
}
+
+ if (config->mux_lpm_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
+ gpio_free(config->mux_lpm_gpio);
+ }
DBG("gpio off");
}
return 0;
+error6:
+ if (config->mux_sel_gpio != -1)
+ gpio_free(config->mux_sel_gpio);
error5:
if (config->mux_en_gpio != -1)
gpio_free(config->mux_en_gpio);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index e5b7ed5b8f01..902d7685d441 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,13 +15,370 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
#include "hdmi.h"
struct hdmi_phy_8960 {
struct hdmi_phy base;
struct hdmi *hdmi;
+ struct clk_hw pll_hw;
+ struct clk *pll;
+ unsigned long pixclk;
};
#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
+
+/*
+ * HDMI PLL:
+ *
+ * To get the parent clock setup properly, we need to plug in hdmi pll
+ * configuration into common-clock-framework.
+ */
+
+struct pll_rate {
+ unsigned long rate;
+ struct {
+ uint32_t val;
+ uint32_t reg;
+ } conf[32];
+};
+
+/* NOTE: keep sorted highest freq to lowest: */
+static const struct pll_rate freqtbl[] = {
+ /* 1080p60/1080p50 case */
+ { 148500000, {
+ { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+ { 108000000, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
+ /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
+ { 74250000, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0, 0 } }
+ },
+ { 65000000, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
+ /* 480p60/480i60 */
+ { 27030000, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+ /* 576p50/576i50 */
+ { 27000000, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+ /* 640x480p60 */
+ { 25200000, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+};
+
+static int hdmi_pll_enable(struct clk_hw *hw)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ int timeout_count, pll_lock_retry = 10;
+ unsigned int val;
+
+ DBG("");
+
+ /* Assert PLL S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
+
+ /* Wait for a short time before de-asserting
+ * to allow the hardware to complete its job.
+ * This much of delay should be fine for hardware
+ * to assert and de-assert.
+ */
+ udelay(10);
+
+ /* De-assert PLL S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ /* Assert PHY S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+ val &= ~HDMI_8960_PHY_REG12_SW_RESET;
+ /* Wait for a short time before de-asserting
+ to allow the hardware to complete its job.
+ This much of delay should be fine for hardware
+ to assert and de-assert. */
+ udelay(10);
+ /* De-assert PHY S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x3f);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+ /* Wait 10 us for enabling global power for PHY */
+ mb();
+ udelay(10);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
+ val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80);
+
+ timeout_count = 1000;
+ while (--pll_lock_retry > 0) {
+
+ /* are we there yet? */
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0);
+ if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
+ break;
+
+ udelay(1);
+
+ if (--timeout_count > 0)
+ continue;
+
+ /*
+ * PLL has still not locked.
+ * Do a software reset and try again
+ * Assert PLL S/W reset first
+ */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ udelay(10);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ /*
+ * Wait for a short duration for the PLL calibration
+ * before checking if the PLL gets locked
+ */
+ udelay(350);
+
+ timeout_count = 1000;
+ }
+
+ return 0;
+}
+
+static void hdmi_pll_disable(struct clk_hw *hw)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ unsigned int val;
+
+ DBG("");
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ /* Make sure HDMI PHY/PLL are powered down */
+ mb();
+}
+
+static const struct pll_rate *find_rate(unsigned long rate)
+{
+ int i;
+ for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
+ if (rate > freqtbl[i].rate)
+ return &freqtbl[i-1];
+ return &freqtbl[i-1];
+}
+
+static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ return phy_8960->pixclk;
+}
+
+static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pll_rate *pll_rate = find_rate(rate);
+ return pll_rate->rate;
+}
+
+static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ const struct pll_rate *pll_rate = find_rate(rate);
+ int i;
+
+ DBG("rate=%lu", rate);
+
+ for (i = 0; pll_rate->conf[i].reg; i++)
+ hdmi_write(hdmi, pll_rate->conf[i].reg, pll_rate->conf[i].val);
+
+ phy_8960->pixclk = rate;
+
+ return 0;
+}
+
+
+static const struct clk_ops hdmi_pll_ops = {
+ .enable = hdmi_pll_enable,
+ .disable = hdmi_pll_disable,
+ .recalc_rate = hdmi_pll_recalc_rate,
+ .round_rate = hdmi_pll_round_rate,
+ .set_rate = hdmi_pll_set_rate,
+};
+
+static const char *hdmi_pll_parents[] = {
+ "pxo",
+};
+
+static struct clk_init_data pll_init = {
+ .name = "hdmi_pll",
+ .ops = &hdmi_pll_ops,
+ .parent_names = hdmi_pll_parents,
+ .num_parents = ARRAY_SIZE(hdmi_pll_parents),
+};
+
+
+/*
+ * HDMI Phy:
+ */
static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
{
@@ -86,6 +443,9 @@ static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
struct hdmi *hdmi = phy_8960->hdmi;
+ DBG("pixclock: %lu", pixclock);
+
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x00);
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
@@ -104,6 +464,8 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
struct hdmi *hdmi = phy_8960->hdmi;
+ DBG("");
+
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
}
@@ -118,7 +480,12 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
{
struct hdmi_phy_8960 *phy_8960;
struct hdmi_phy *phy = NULL;
- int ret;
+ int ret, i;
+
+ /* sanity check: */
+ for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
+ if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
+ return ERR_PTR(-EINVAL);
phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
if (!phy_8960) {
@@ -132,6 +499,14 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
phy_8960->hdmi = hdmi;
+ phy_8960->pll_hw.init = &pll_init;
+ phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
+ if (IS_ERR(phy_8960->pll)) {
+ ret = PTR_ERR(phy_8960->pll);
+ phy_8960->pll = NULL;
+ goto fail;
+ }
+
return phy;
fail:
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index d591567173c4..bd81db6a7829 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <[email protected]> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 416a26e1e58d..122208e8a2ee 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <[email protected]> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 0bb4faa17523..733646c0d3f8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -147,7 +147,7 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
if (mdp4_kms->blank_cursor_bo)
- drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
kfree(mdp4_kms);
}
@@ -176,6 +176,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
if (mdp4_kms->pclk)
clk_disable_unprepare(mdp4_kms->pclk);
clk_disable_unprepare(mdp4_kms->lut_clk);
+ if (mdp4_kms->axi_clk)
+ clk_disable_unprepare(mdp4_kms->axi_clk);
return 0;
}
@@ -188,6 +190,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
if (mdp4_kms->pclk)
clk_prepare_enable(mdp4_kms->pclk);
clk_prepare_enable(mdp4_kms->lut_clk);
+ if (mdp4_kms->axi_clk)
+ clk_prepare_enable(mdp4_kms->axi_clk);
return 0;
}
@@ -294,15 +298,17 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
+ mdp4_kms->dsi_pll_vdda =
+ devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda");
if (IS_ERR(mdp4_kms->dsi_pll_vdda))
mdp4_kms->dsi_pll_vdda = NULL;
- mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
+ mdp4_kms->dsi_pll_vddio =
+ devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio");
if (IS_ERR(mdp4_kms->dsi_pll_vddio))
mdp4_kms->dsi_pll_vddio = NULL;
- mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
if (IS_ERR(mdp4_kms->vdd))
mdp4_kms->vdd = NULL;
@@ -333,6 +339,13 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
+ mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk");
+ if (IS_ERR(mdp4_kms->axi_clk)) {
+ dev_err(dev->dev, "failed to get axi_clk\n");
+ ret = PTR_ERR(mdp4_kms->axi_clk);
+ goto fail;
+ }
+
clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
@@ -348,7 +361,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(dev, config->iommu);
+ mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
@@ -406,6 +419,8 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
static struct mdp4_platform_config config = {};
#ifdef CONFIG_OF
/* TODO */
+ config.max_clk = 266667000;
+ config.iommu = iommu_domain_alloc(&platform_bus_type);
#else
if (cpu_is_apq8064())
config.max_clk = 266667000;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 715520c54cde..3225da804c61 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -42,6 +42,7 @@ struct mdp4_kms {
struct clk *clk;
struct clk *pclk;
struct clk *lut_clk;
+ struct clk *axi_clk;
struct mdp_irq error_handler;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 0aa51517f826..67f4f896ba8c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <[email protected]> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -68,6 +68,8 @@ enum mdp5_pipe {
SSPP_RGB2 = 5,
SSPP_DMA0 = 6,
SSPP_DMA1 = 7,
+ SSPP_VIG3 = 8,
+ SSPP_RGB3 = 9,
};
enum mdp5_ctl_mode {
@@ -126,7 +128,11 @@ enum mdp5_client_id {
CID_RGB0 = 16,
CID_RGB1 = 17,
CID_RGB2 = 18,
- CID_MAX = 19,
+ CID_VIG3_Y = 19,
+ CID_VIG3_CR = 20,
+ CID_VIG3_CB = 21,
+ CID_RGB3 = 22,
+ CID_MAX = 23,
};
enum mdp5_igc_type {
@@ -299,11 +305,34 @@ static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
-static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; }
+static inline uint32_t __offset_CTL(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ctl.base[0]);
+ case 1: return (mdp5_cfg->ctl.base[1]);
+ case 2: return (mdp5_cfg->ctl.base[2]);
+ case 3: return (mdp5_cfg->ctl.base[3]);
+ case 4: return (mdp5_cfg->ctl.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); }
-static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+static inline uint32_t __offset_LAYER(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000000;
+ case 1: return 0x00000004;
+ case 2: return 0x00000008;
+ case 3: return 0x0000000c;
+ case 4: return 0x00000010;
+ case 5: return 0x00000024;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
-static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
@@ -354,8 +383,20 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
}
#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
+#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
+#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
+#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
+}
-static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); }
#define MDP5_CTL_OP_MODE__MASK 0x0000000f
#define MDP5_CTL_OP_MODE__SHIFT 0
static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
@@ -377,7 +418,7 @@ static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
}
-static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); }
#define MDP5_CTL_FLUSH_VIG0 0x00000001
#define MDP5_CTL_FLUSH_VIG1 0x00000002
#define MDP5_CTL_FLUSH_VIG2 0x00000004
@@ -387,26 +428,48 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x1
#define MDP5_CTL_FLUSH_LM0 0x00000040
#define MDP5_CTL_FLUSH_LM1 0x00000080
#define MDP5_CTL_FLUSH_LM2 0x00000100
+#define MDP5_CTL_FLUSH_LM3 0x00000200
+#define MDP5_CTL_FLUSH_LM4 0x00000400
#define MDP5_CTL_FLUSH_DMA0 0x00000800
#define MDP5_CTL_FLUSH_DMA1 0x00001000
#define MDP5_CTL_FLUSH_DSPP0 0x00002000
#define MDP5_CTL_FLUSH_DSPP1 0x00004000
#define MDP5_CTL_FLUSH_DSPP2 0x00008000
#define MDP5_CTL_FLUSH_CTL 0x00020000
+#define MDP5_CTL_FLUSH_VIG3 0x00040000
+#define MDP5_CTL_FLUSH_RGB3 0x00080000
+#define MDP5_CTL_FLUSH_LM5 0x00100000
+#define MDP5_CTL_FLUSH_DSPP3 0x00200000
-static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
-static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
-static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
+{
+ switch (idx) {
+ case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
+ case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
+ case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
+ case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]);
+ case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]);
+ case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]);
+ case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]);
+ case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
+ case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
+ case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
@@ -420,7 +483,7 @@ static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
@@ -434,7 +497,7 @@ static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
@@ -448,7 +511,7 @@ static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); }
#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
@@ -462,7 +525,7 @@ static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); }
#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
@@ -476,15 +539,15 @@ static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
@@ -498,7 +561,7 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
@@ -512,9 +575,9 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
@@ -568,7 +631,7 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_ty
return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
@@ -594,7 +657,7 @@ static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
@@ -610,29 +673,29 @@ static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
-static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); }
#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
@@ -646,7 +709,7 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
@@ -686,23 +749,34 @@ static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_
return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+static inline uint32_t __offset_LM(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->lm.base[0]);
+ case 1: return (mdp5_cfg->lm.base[1]);
+ case 2: return (mdp5_cfg->lm.base[2]);
+ case 3: return (mdp5_cfg->lm.base[3]);
+ case 4: return (mdp5_cfg->lm.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
-static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
@@ -716,13 +790,13 @@ static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -744,57 +818,67 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
-static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+static inline uint32_t __offset_DSPP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->dspp.base[0]);
+ case 1: return (mdp5_cfg->dspp.base[1]);
+ case 2: return (mdp5_cfg->dspp.base[2]);
+ case 3: return (mdp5_cfg->dspp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
@@ -811,29 +895,40 @@ static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
-static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+static inline uint32_t __offset_INTF(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->intf.base[0]);
+ case 1: return (mdp5_cfg->intf.base[1]);
+ case 2: return (mdp5_cfg->intf.base[2]);
+ case 3: return (mdp5_cfg->intf.base[3]);
+ case 4: return (mdp5_cfg->intf.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); }
#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
@@ -847,23 +942,23 @@ static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
}
-static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); }
#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
@@ -872,7 +967,7 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
}
#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); }
#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
@@ -880,11 +975,11 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
}
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); }
#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
@@ -898,7 +993,7 @@ static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
}
-static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); }
#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
@@ -913,124 +1008,132 @@ static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
}
#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
-static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); }
#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
-static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+static inline uint32_t __offset_AD(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ad.base[0]);
+ case 1: return (mdp5_cfg->ad.base[1]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); }
#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 71510ee26e96..31a2c6331a1d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -26,14 +26,98 @@ static const char *iommu_ports[] = {
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
-static int mdp5_hw_init(struct msm_kms *kms)
+const struct mdp5_config *mdp5_cfg;
+
+static const struct mdp5_config msm8x74_config = {
+ .name = "msm8x74",
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01200, 0x01600, 0x01a00 },
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01e00, 0x02200, 0x02600 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02a00, 0x02e00 },
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04600, 0x04a00, 0x04e00 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+ },
+ .intf = {
+ .count = 4,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+ },
+};
+
+static const struct mdp5_config apq8084_config = {
+ .name = "apq8084",
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x03200, 0x03600 },
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x13500, 0x13700, 0x13900 },
+ },
+ .intf = {
+ .count = 5,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+ },
+};
+
+struct mdp5_config_entry {
+ int revision;
+ const struct mdp5_config *config;
+};
+
+static const struct mdp5_config_entry mdp5_configs[] = {
+ { .revision = 0, .config = &msm8x74_config },
+ { .revision = 2, .config = &msm8x74_config },
+ { .revision = 3, .config = &apq8084_config },
+};
+
+static int mdp5_select_hw_cfg(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev;
uint32_t version, major, minor;
- int ret = 0;
-
- pm_runtime_get_sync(dev->dev);
+ int i, ret = 0;
mdp5_enable(mdp5_kms);
version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
@@ -44,8 +128,8 @@ static int mdp5_hw_init(struct msm_kms *kms)
DBG("found MDP5 version v%d.%d", major, minor);
- if ((major != 1) || ((minor != 0) && (minor != 2))) {
- dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ if (major != 1) {
+ dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
@@ -53,6 +137,35 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_kms->rev = minor;
+ /* only after mdp5_cfg global pointer's init can we access the hw */
+ for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
+ if (mdp5_configs[i].revision != minor)
+ continue;
+ mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
+ break;
+ }
+ if (unlikely(!mdp5_kms->hw_cfg)) {
+ dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
+
+ return 0;
+out:
+ return ret;
+}
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct drm_device *dev = mdp5_kms->dev;
+ int i;
+
+ pm_runtime_get_sync(dev->dev);
+
/* Magic unknown register writes:
*
* W VBIF:0x004 00000001 (mdss_mdp.c:839)
@@ -78,15 +191,13 @@ static int mdp5_hw_init(struct msm_kms *kms)
*/
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
-out:
+ for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
+
pm_runtime_put_sync(dev->dev);
- return ret;
+ return 0;
}
static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -161,7 +272,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
static const enum mdp5_pipe crtcs[] = {
- SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
};
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -169,7 +280,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
int i, ret;
/* construct CRTCs: */
- for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
+ for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane;
struct drm_crtc *crtc;
@@ -246,7 +357,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- int ret;
+ int i, ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
if (!mdp5_kms) {
@@ -307,20 +418,22 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+ ret = mdp5_select_hw_cfg(kms);
+ if (ret)
+ goto fail;
+
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
+ for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
mdp5_disable(mdp5_kms);
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(dev, config->iommu);
+ mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -368,5 +481,11 @@ static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
#ifdef CONFIG_OF
/* TODO */
#endif
+ config.iommu = iommu_domain_alloc(&platform_bus_type);
+ /* TODO hard-coded in downstream mdss, but should it be? */
+ config.max_clk = 200000000;
+ /* TODO get from DT: */
+ config.smp_blk_cnt = 22;
+
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 6e981b692d1d..5bf340dd0f00 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,6 +21,24 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp/mdp_kms.h"
+/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */
+#define MDP5_MAX_BASES 8
+struct mdp5_sub_block {
+ int count;
+ uint32_t base[MDP5_MAX_BASES];
+};
+struct mdp5_config {
+ char *name;
+ struct mdp5_sub_block ctl;
+ struct mdp5_sub_block pipe_vig;
+ struct mdp5_sub_block pipe_rgb;
+ struct mdp5_sub_block pipe_dma;
+ struct mdp5_sub_block lm;
+ struct mdp5_sub_block dspp;
+ struct mdp5_sub_block ad;
+ struct mdp5_sub_block intf;
+};
+extern const struct mdp5_config *mdp5_cfg;
#include "mdp5.xml.h"
#include "mdp5_smp.h"
@@ -30,6 +48,7 @@ struct mdp5_kms {
struct drm_device *dev;
int rev;
+ const struct mdp5_config *hw_cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
@@ -82,6 +101,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
NAME(VIG0), NAME(VIG1), NAME(VIG2),
NAME(RGB0), NAME(RGB1), NAME(RGB2),
NAME(DMA0), NAME(DMA1),
+ NAME(VIG3), NAME(RGB3),
#undef NAME
};
return names[pipe];
@@ -98,6 +118,8 @@ static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
default: return 0;
}
}
@@ -108,6 +130,7 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
case SSPP_RGB0:
case SSPP_RGB1:
case SSPP_RGB2:
+ case SSPP_RGB3:
return 1;
default:
return 3;
@@ -126,6 +149,8 @@ static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
case SSPP_RGB2: return CID_RGB2;
case SSPP_DMA0: return CID_DMA0_Y + plane;
case SSPP_DMA1: return CID_DMA1_Y + plane;
+ case SSPP_VIG3: return CID_VIG3_Y + plane;
+ case SSPP_RGB3: return CID_RGB3;
default: return CID_UNUSED;
}
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index a9629b85b983..64c1afd6030a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <[email protected]> (robclark)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9a5d87db5c23..b447c01ad89c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -181,7 +181,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
struct msm_kms *kms;
int ret;
-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(dev->dev, "failed to allocate private data\n");
@@ -314,13 +313,15 @@ fail:
static void load_gpu(struct drm_device *dev)
{
+ static DEFINE_MUTEX(init_lock);
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu;
+ mutex_lock(&init_lock);
+
if (priv->gpu)
- return;
+ goto out;
- mutex_lock(&dev->struct_mutex);
gpu = a3xx_gpu_init(dev);
if (IS_ERR(gpu)) {
dev_warn(dev->dev, "failed to load a3xx gpu\n");
@@ -330,7 +331,9 @@ static void load_gpu(struct drm_device *dev)
if (gpu) {
int ret;
+ mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
+ mutex_unlock(&dev->struct_mutex);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
@@ -340,12 +343,12 @@ static void load_gpu(struct drm_device *dev)
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
-
}
priv->gpu = gpu;
- mutex_unlock(&dev->struct_mutex);
+out:
+ mutex_unlock(&init_lock);
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
@@ -906,25 +909,22 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
-static int msm_drm_add_components(struct device *master, struct master *m)
+static int add_components(struct device *dev, struct component_match **matchptr,
+ const char *name)
{
- struct device_node *np = master->of_node;
+ struct device_node *np = dev->of_node;
unsigned i;
- int ret;
for (i = 0; ; i++) {
struct device_node *node;
- node = of_parse_phandle(np, "connectors", i);
+ node = of_parse_phandle(np, name, i);
if (!node)
break;
- ret = component_master_add_child(m, compare_of, node);
- of_node_put(node);
-
- if (ret)
- return ret;
+ component_match_add(dev, matchptr, compare_of, node);
}
+
return 0;
}
#else
@@ -932,9 +932,34 @@ static int compare_dev(struct device *dev, void *data)
{
return dev == data;
}
+#endif
+
+static int msm_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&msm_driver, to_platform_device(dev));
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops msm_drm_ops = {
+ .bind = msm_drm_bind,
+ .unbind = msm_drm_unbind,
+};
+
+/*
+ * Platform driver:
+ */
-static int msm_drm_add_components(struct device *master, struct master *m)
+static int msm_pdev_probe(struct platform_device *pdev)
{
+ struct component_match *match = NULL;
+#ifdef CONFIG_OF
+ add_components(&pdev->dev, &match, "connectors");
+ add_components(&pdev->dev, &match, "gpus");
+#else
/* For non-DT case, it kinda sucks. We don't actually have a way
* to know whether or not we are waiting for certain devices (or if
* they are simply not present). But for non-DT we only need to
@@ -958,41 +983,12 @@ static int msm_drm_add_components(struct device *master, struct master *m)
return -EPROBE_DEFER;
}
- ret = component_master_add_child(m, compare_dev, dev);
- if (ret) {
- DBG("could not add child: %d", ret);
- return ret;
- }
+ component_match_add(&pdev->dev, &match, compare_dev, dev);
}
-
- return 0;
-}
#endif
-static int msm_drm_bind(struct device *dev)
-{
- return drm_platform_init(&msm_driver, to_platform_device(dev));
-}
-
-static void msm_drm_unbind(struct device *dev)
-{
- drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
-}
-
-static const struct component_master_ops msm_drm_ops = {
- .add_components = msm_drm_add_components,
- .bind = msm_drm_bind,
- .unbind = msm_drm_unbind,
-};
-
-/*
- * Platform driver:
- */
-
-static int msm_pdev_probe(struct platform_device *pdev)
-{
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- return component_master_add(&pdev->dev, &msm_drm_ops);
+ return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
}
static int msm_pdev_remove(struct platform_device *pdev)
@@ -1008,7 +1004,8 @@ static const struct platform_device_id msm_id[] = {
};
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdss_mdp" },
+ { .compatible = "qcom,mdp" }, /* mdp4 */
+ { .compatible = "qcom,mdss_mdp" }, /* mdp5 */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c437065933e3..9c5221ce391a 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -19,6 +19,11 @@
#include "drm_crtc.h"
#include "drm_fb_helper.h"
+#include "msm_gem.h"
+
+extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
/*
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
@@ -43,6 +48,7 @@ static struct fb_ops msm_fb_ops = {
.fb_fillrect = sys_fillrect,
.fb_copyarea = sys_copyarea,
.fb_imageblit = sys_imageblit,
+ .fb_mmap = msm_fbdev_mmap,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
@@ -51,6 +57,31 @@ static struct fb_ops msm_fb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
+static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
+ struct msm_fbdev *fbdev = to_msm_fbdev(helper);
+ struct drm_gem_object *drm_obj = fbdev->bo;
+ struct drm_device *dev = helper->dev;
+ int ret = 0;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret) {
+ pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
+ return ret;
+ }
+
+ return msm_gem_mmap_obj(drm_obj, vma);
+}
+
static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -104,8 +135,16 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- /* TODO implement our own fb_mmap so we don't need this: */
- msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+ /*
+ * NOTE: if we can be guaranteed to be able to map buffer
+ * in panic (ie. lock-safe, etc) we could avoid pinning the
+ * buffer now:
+ */
+ ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+ if (ret) {
+ dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+ goto fail;
+ }
fbi = framebuffer_alloc(0, dev->dev);
if (!fbi) {
@@ -189,7 +228,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_fbdev *fbdev = NULL;
struct drm_fb_helper *helper;
- int ret = 0;
+ int ret;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 713722b0ba78..4b1b82adabde 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -278,24 +278,23 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct drm_device *dev = obj->dev;
int ret = 0;
if (!msm_obj->domain[id].iova) {
struct msm_drm_private *priv = obj->dev->dev_private;
- struct msm_mmu *mmu = priv->mmus[id];
struct page **pages = get_pages(obj);
- if (!mmu) {
- dev_err(dev->dev, "null MMU pointer\n");
- return -EINVAL;
- }
-
if (IS_ERR(pages))
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
- uint32_t offset = (uint32_t)mmap_offset(obj);
+ struct msm_mmu *mmu = priv->mmus[id];
+ uint32_t offset;
+
+ if (WARN_ON(!mmu))
+ return -EINVAL;
+
+ offset = (uint32_t)mmap_offset(obj);
ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
obj->size, IOMMU_READ | IOMMU_WRITE);
msm_obj->domain[id].iova = offset;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c6322197db8c..4a0dce587745 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -606,14 +606,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(drm, iommu);
+ gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
gpu->id = msm_register_mmu(drm, gpu->mmu);
+
/* Create ringbuffer: */
+ mutex_lock(&drm->struct_mutex);
gpu->rb = msm_ringbuffer_new(gpu, ringsz);
+ mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->rb)) {
ret = PTR_ERR(gpu->rb);
gpu->rb = NULL;
@@ -621,13 +624,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
- if (ret) {
- gpu->rb_iova = 0;
- dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
- goto fail;
- }
-
bs_init(gpu);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 4b2ad9181edf..099af483fdf0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -33,39 +33,14 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
- struct drm_device *dev = mmu->dev;
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int i, ret;
-
- for (i = 0; i < cnt; i++) {
- struct device *msm_iommu_get_ctx(const char *ctx_name);
- struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_warn(dev->dev, "couldn't get %s context", names[i]);
- continue;
- }
- ret = iommu_attach_device(iommu->domain, ctx);
- if (ret) {
- dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
- return ret;
- }
- }
-
- return 0;
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int i;
-
- for (i = 0; i < cnt; i++) {
- struct device *msm_iommu_get_ctx(const char *ctx_name);
- struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (IS_ERR_OR_NULL(ctx))
- continue;
- iommu_detach_device(iommu->domain, ctx);
- }
+ iommu_detach_device(iommu->domain, mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
@@ -149,7 +124,7 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
-struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{
struct msm_iommu *iommu;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 21da6d154f71..7cd88d9dc155 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -32,17 +32,17 @@ struct msm_mmu_funcs {
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
- struct drm_device *dev;
+ struct device *dev;
};
-static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
const struct msm_mmu_funcs *funcs)
{
mmu->dev = dev;
mmu->funcs = funcs;
}
-struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 26e962b7e702..2283c442a10d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1516,11 +1516,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
}
switch ((ctrl & 0x000f0000) >> 16) {
- case 6: datarate = pclk * 30 / 8; break;
- case 5: datarate = pclk * 24 / 8; break;
+ case 6: datarate = pclk * 30; break;
+ case 5: datarate = pclk * 24; break;
case 2:
default:
- datarate = pclk * 18 / 8;
+ datarate = pclk * 18;
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 48aa38a87e3f..fa30d8196f35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1159,11 +1159,11 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
if (outp->info.type == DCB_OUTPUT_DP) {
u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
switch ((sync & 0x000003c0) >> 6) {
- case 6: pclk = pclk * 30 / 8; break;
- case 5: pclk = pclk * 24 / 8; break;
+ case 6: pclk = pclk * 30; break;
+ case 5: pclk = pclk * 24; break;
case 2:
default:
- pclk = pclk * 18 / 8;
+ pclk = pclk * 18;
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
index 52c299c3d300..eb2d7789555d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -34,7 +34,7 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
struct nvkm_output_dp *outp = (void *)base;
bool retrain = true;
u8 link[2], stat[3];
- u32 rate;
+ u32 linkrate;
int ret, i;
/* check that the link is trained at a high enough rate */
@@ -44,8 +44,10 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
goto done;
}
- rate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
- if (rate < ((datarate / 8) * 10)) {
+ linkrate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
+ linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
+ datarate = (datarate + 9) / 10; /* -> decakilobits */
+ if (linkrate < datarate) {
DBG("link not trained at sufficient rate\n");
goto done;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index e1832778e8b6..7a1ebdfa9e1b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -87,6 +87,7 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
struct nvkm_output_dp *outpdp = (void *)outp;
switch (data) {
case NV94_DISP_SOR_DP_PWR_STATE_OFF:
+ nouveau_event_put(outpdp->irq);
((struct nvkm_output_dp_impl *)nv_oclass(outp))
->lnk_pwr(outpdp, 0);
atomic_set(&outpdp->lt.done, 0);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index 0f57fcfe0bbf..2af9cfd2c60f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -26,7 +26,7 @@ ramfuc_reg2(u32 addr1, u32 addr2)
};
}
-static inline struct ramfuc_reg
+static noinline struct ramfuc_reg
ramfuc_reg(u32 addr)
{
return ramfuc_reg2(addr, addr);
@@ -107,7 +107,7 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec)
#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
-#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
+#define ram_have(s,r) ((s)->r_##r.addr[0] != 0x000000)
#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 1ad3ea503133..c5b46e302319 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -200,6 +200,7 @@ r1373f4_init(struct nve0_ramfuc *fuc)
/* (re)program mempll, if required */
if (ram->mode == 2) {
ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+ ram_mask(fuc, 0x132000, 0x80000000, 0x80000000);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index cfde9eb44ad0..6212537b90c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
NOUVEAU_THERM_THRS_SHUTDOWN);
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
/* schedule the next poll in one second */
if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
- ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
-
- spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+ ptimer->alarm(ptimer, 1000000000ULL, alarm);
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b6dc85c614be..ba29a701ca1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -309,7 +309,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
+ ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
goto out;
@@ -350,7 +350,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret, ref;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
+ ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
return ret;
@@ -385,7 +385,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
{
int ret;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 680f46d007a0..dbdc9ad59546 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -63,7 +63,7 @@ find_encoder(struct drm_connector *connector, int type)
{
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
- struct drm_mode_object *obj;
+ struct drm_encoder *enc;
int i, id;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
@@ -71,10 +71,10 @@ find_encoder(struct drm_connector *connector, int type)
if (!id)
break;
- obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ enc = drm_encoder_find(dev, id);
+ if (!enc)
continue;
- nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+ nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
(nv_encoder->dcb && nv_encoder->dcb->type == type))
@@ -119,7 +119,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_encoder *nv_encoder;
- struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
int i, panel = -ENODEV;
/* eDP panels need powering on by us (if the VBIOS doesn't default it
@@ -139,10 +139,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (id == 0)
break;
- obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(dev, id);
+ if (!encoder)
continue;
- nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+ nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
int ret = nouveau_dp_detect(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ddd83756b9a2..5425ffe3931d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -652,12 +652,12 @@ int nouveau_pmops_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev);
if (ret)
return ret;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 0);
- nouveau_fbcon_zfill_all(drm_dev);
- if (drm_dev->mode_config.num_crtc)
+ if (drm_dev->mode_config.num_crtc) {
nouveau_display_resume(drm_dev);
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ }
+
return 0;
}
@@ -683,11 +683,12 @@ static int nouveau_pmops_thaw(struct device *dev)
ret = nouveau_do_resume(drm_dev);
if (ret)
return ret;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 0);
- nouveau_fbcon_zfill_all(drm_dev);
- if (drm_dev->mode_config.num_crtc)
+
+ if (drm_dev->mode_config.num_crtc) {
nouveau_display_resume(drm_dev);
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index afe706a20f97..758c11cb9a9a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -532,17 +532,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
if (state == 1)
nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(drm->fbcon->helper.fbdev, state);
- if (state == 0)
+ if (state == 0) {
nouveau_fbcon_restore_accel(dev);
+ nouveau_fbcon_zfill(dev, drm->fbcon);
+ }
console_unlock();
}
}
-
-void
-nouveau_fbcon_zfill_all(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- nouveau_fbcon_zfill(dev, drm->fbcon);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index fdfc0c94fbcc..fcff797d2084 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -61,7 +61,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
void nouveau_fbcon_fini(struct drm_device *dev);
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
-void nouveau_fbcon_zfill_all(struct drm_device *dev);
void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
void nouveau_fbcon_restore_accel(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c90c0dc0afe8..df9d451afdcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -61,7 +61,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!cli->base.vm)
return 0;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return ret;
@@ -132,7 +132,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!cli->base.vm)
return;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index afdf607df3e6..4c534b7b04da 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1741,7 +1741,8 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
}
- mthd = (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
+ mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3;
+ mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
mthd |= nv_encoder->or;
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f926b4caf449..56c60552abba 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
{
- dma_addr_t pat_pa = 0;
+ dma_addr_t pat_pa = 0, data_pa = 0;
uint32_t *data;
struct pat *pat;
struct refill_engine *engine = txn->engine_handle;
@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
.lut_id = engine->tcm->lut_id,
};
- data = alloc_dma(txn, 4*i, &pat->data_pa);
+ data = alloc_dma(txn, 4*i, &data_pa);
+ /* FIXME: what if data_pa is more than 32-bit ? */
+ pat->data_pa = data_pa;
while (i--) {
int n = i + roll;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 284b80fc3c54..b08a450d1b5d 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -119,13 +119,6 @@ struct omap_drm_private {
struct omap_drm_irq error_handler;
};
-/* this should probably be in drm-core to standardize amongst drivers */
-#define DRM_ROTATE_0 0
-#define DRM_ROTATE_90 1
-#define DRM_ROTATE_180 2
-#define DRM_ROTATE_270 3
-#define DRM_REFLECT_X 4
-#define DRM_REFLECT_Y 5
#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 5c3670017a4a..e4849413ee80 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -787,7 +787,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
omap_obj->paddr = tiler_ssptr(block);
omap_obj->block = block;
- DBG("got paddr: %08x", omap_obj->paddr);
+ DBG("got paddr: %pad", &omap_obj->paddr);
}
omap_obj->paddr_cnt++;
@@ -981,9 +981,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off = drm_vma_node_start(&obj->vma_node);
- seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+ seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, obj->refcount.refcount.counter,
- off, omap_obj->paddr, omap_obj->paddr_cnt,
+ off, &omap_obj->paddr, omap_obj->paddr_cnt,
omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED) {
@@ -1464,8 +1464,8 @@ void omap_gem_init(struct drm_device *dev)
entry->paddr = tiler_ssptr(block);
entry->block = block;
- DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
- entry->paddr,
+ DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
+ &entry->paddr,
usergart[i].stride_pfn << PAGE_SHIFT);
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 3cf31ee59aac..891a4dc608af 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
info->out_width, info->out_height,
info->screen_width);
- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
- info->paddr, info->p_uv_addr);
+ DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
+ &info->paddr, &info->p_uv_addr);
/* TODO: */
ilace = false;
@@ -308,16 +308,13 @@ void omap_plane_install_properties(struct drm_plane *plane,
if (priv->has_dmm) {
prop = priv->rotation_prop;
if (!prop) {
- const struct drm_prop_enum_list props[] = {
- { DRM_ROTATE_0, "rotate-0" },
- { DRM_ROTATE_90, "rotate-90" },
- { DRM_ROTATE_180, "rotate-180" },
- { DRM_ROTATE_270, "rotate-270" },
- { DRM_REFLECT_X, "reflect-x" },
- { DRM_REFLECT_Y, "reflect-y" },
- };
- prop = drm_property_create_bitmask(dev, 0, "rotation",
- props, ARRAY_SIZE(props));
+ prop = drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_90) |
+ BIT(DRM_ROTATE_180) |
+ BIT(DRM_ROTATE_270) |
+ BIT(DRM_REFLECT_X) |
+ BIT(DRM_REFLECT_Y));
if (prop == NULL)
return;
priv->rotation_prop = prop;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 4ec874da5668..bee9f72b3a93 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -5,7 +5,7 @@ config DRM_PANEL
Panel registration and lookup framework.
menu "Display Panels"
- depends on DRM_PANEL
+ depends on DRM && DRM_PANEL
config DRM_PANEL_SIMPLE
tristate "support for simple panels"
@@ -18,14 +18,11 @@ config DRM_PANEL_SIMPLE
config DRM_PANEL_LD9040
tristate "LD9040 RGB/SPI panel"
- depends on DRM && DRM_PANEL
- depends on OF
- select SPI
+ depends on OF && SPI
select VIDEOMODE_HELPERS
config DRM_PANEL_S6E8AA0
tristate "S6E8AA0 DSI video mode panel"
- depends on DRM && DRM_PANEL
depends on OF
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index db1601fdbe29..42ac67b21e9f 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -110,7 +110,10 @@ struct ld9040 {
int error;
};
-#define panel_to_ld9040(p) container_of(p, struct ld9040, panel)
+static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
+{
+ return container_of(panel, struct ld9040, panel);
+}
static int ld9040_clear_error(struct ld9040 *ctx)
{
@@ -216,6 +219,11 @@ static int ld9040_power_off(struct ld9040 *ctx)
static int ld9040_disable(struct drm_panel *panel)
{
+ return 0;
+}
+
+static int ld9040_unprepare(struct drm_panel *panel)
+{
struct ld9040 *ctx = panel_to_ld9040(panel);
msleep(120);
@@ -228,7 +236,7 @@ static int ld9040_disable(struct drm_panel *panel)
return ld9040_power_off(ctx);
}
-static int ld9040_enable(struct drm_panel *panel)
+static int ld9040_prepare(struct drm_panel *panel)
{
struct ld9040 *ctx = panel_to_ld9040(panel);
int ret;
@@ -242,11 +250,16 @@ static int ld9040_enable(struct drm_panel *panel)
ret = ld9040_clear_error(ctx);
if (ret < 0)
- ld9040_disable(panel);
+ ld9040_unprepare(panel);
return ret;
}
+static int ld9040_enable(struct drm_panel *panel)
+{
+ return 0;
+}
+
static int ld9040_get_modes(struct drm_panel *panel)
{
struct drm_connector *connector = panel->connector;
@@ -273,6 +286,8 @@ static int ld9040_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs ld9040_drm_funcs = {
.disable = ld9040_disable,
+ .unprepare = ld9040_unprepare,
+ .prepare = ld9040_prepare,
.enable = ld9040_enable,
.get_modes = ld9040_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index 06e57a26db7a..b5217fe37f02 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -120,7 +120,10 @@ struct s6e8aa0 {
int error;
};
-#define panel_to_s6e8aa0(p) container_of(p, struct s6e8aa0, panel)
+static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e8aa0, panel);
+}
static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
{
@@ -133,14 +136,14 @@ static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ ssize_t ret;
if (ctx->error < 0)
return;
- ret = mipi_dsi_dcs_write(dsi, dsi->channel, data, len);
+ ret = mipi_dsi_dcs_write(dsi, data, len);
if (ret < 0) {
- dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
+ dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len,
data);
ctx->error = ret;
}
@@ -154,7 +157,7 @@ static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
if (ctx->error < 0)
return ctx->error;
- ret = mipi_dsi_dcs_read(dsi, dsi->channel, cmd, data, len);
+ ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
if (ret < 0) {
dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
ctx->error = ret;
@@ -889,6 +892,11 @@ static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
static int s6e8aa0_disable(struct drm_panel *panel)
{
+ return 0;
+}
+
+static int s6e8aa0_unprepare(struct drm_panel *panel)
+{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
@@ -900,7 +908,7 @@ static int s6e8aa0_disable(struct drm_panel *panel)
return s6e8aa0_power_off(ctx);
}
-static int s6e8aa0_enable(struct drm_panel *panel)
+static int s6e8aa0_prepare(struct drm_panel *panel)
{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
int ret;
@@ -913,11 +921,16 @@ static int s6e8aa0_enable(struct drm_panel *panel)
ret = ctx->error;
if (ret < 0)
- s6e8aa0_disable(panel);
+ s6e8aa0_unprepare(panel);
return ret;
}
+static int s6e8aa0_enable(struct drm_panel *panel)
+{
+ return 0;
+}
+
static int s6e8aa0_get_modes(struct drm_panel *panel)
{
struct drm_connector *connector = panel->connector;
@@ -944,6 +957,8 @@ static int s6e8aa0_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
.disable = s6e8aa0_disable,
+ .unprepare = s6e8aa0_unprepare,
+ .prepare = s6e8aa0_prepare,
.enable = s6e8aa0_enable,
.get_modes = s6e8aa0_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a25136132c31..4ce1db0a68ff 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -37,14 +37,35 @@ struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
+ unsigned int bpc;
+
struct {
unsigned int width;
unsigned int height;
} size;
+
+ /**
+ * @prepare: the time (in milliseconds) that it takes for the panel to
+ * become ready and start receiving video data
+ * @enable: the time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data
+ * @disable: the time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible)
+ * @unprepare: the time (in milliseconds) that it takes for the panel
+ * to power itself down completely
+ */
+ struct {
+ unsigned int prepare;
+ unsigned int enable;
+ unsigned int disable;
+ unsigned int unprepare;
+ } delay;
};
struct panel_simple {
struct drm_panel base;
+ bool prepared;
bool enabled;
const struct panel_desc *desc;
@@ -87,6 +108,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
num++;
}
+ connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
connector->display_info.height_mm = panel->desc->size.height;
@@ -105,21 +127,40 @@ static int panel_simple_disable(struct drm_panel *panel)
backlight_update_status(p->backlight);
}
+ if (p->desc->delay.disable)
+ msleep(p->desc->delay.disable);
+
+ p->enabled = false;
+
+ return 0;
+}
+
+static int panel_simple_unprepare(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (!p->prepared)
+ return 0;
+
if (p->enable_gpio)
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->enabled = false;
+
+ if (p->desc->delay.unprepare)
+ msleep(p->desc->delay.unprepare);
+
+ p->prepared = false;
return 0;
}
-static int panel_simple_enable(struct drm_panel *panel)
+static int panel_simple_prepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
int err;
- if (p->enabled)
+ if (p->prepared)
return 0;
err = regulator_enable(p->supply);
@@ -131,6 +172,24 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->enable_gpio)
gpiod_set_value_cansleep(p->enable_gpio, 1);
+ if (p->desc->delay.prepare)
+ msleep(p->desc->delay.prepare);
+
+ p->prepared = true;
+
+ return 0;
+}
+
+static int panel_simple_enable(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (p->enabled)
+ return 0;
+
+ if (p->desc->delay.enable)
+ msleep(p->desc->delay.enable);
+
if (p->backlight) {
p->backlight->props.power = FB_BLANK_UNBLANK;
backlight_update_status(p->backlight);
@@ -164,6 +223,8 @@ static int panel_simple_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs panel_simple_funcs = {
.disable = panel_simple_disable,
+ .unprepare = panel_simple_unprepare,
+ .prepare = panel_simple_prepare,
.enable = panel_simple_enable,
.get_modes = panel_simple_get_modes,
};
@@ -179,22 +240,21 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -ENOMEM;
panel->enabled = false;
+ panel->prepared = false;
panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
return PTR_ERR(panel->supply);
- panel->enable_gpio = devm_gpiod_get(dev, "enable");
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable");
if (IS_ERR(panel->enable_gpio)) {
err = PTR_ERR(panel->enable_gpio);
- if (err != -ENOENT) {
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ dev_err(dev, "failed to request GPIO: %d\n", err);
+ return err;
+ }
- panel->enable_gpio = NULL;
- } else {
+ if (panel->enable_gpio) {
err = gpiod_direction_output(panel->enable_gpio, 0);
if (err < 0) {
dev_err(dev, "failed to setup GPIO: %d\n", err);
@@ -285,6 +345,7 @@ static const struct drm_display_mode auo_b101aw03_mode = {
static const struct panel_desc auo_b101aw03 = {
.modes = &auo_b101aw03_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 223,
.height = 125,
@@ -307,12 +368,40 @@ static const struct drm_display_mode auo_b133xtn01_mode = {
static const struct panel_desc auo_b133xtn01 = {
.modes = &auo_b133xtn01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 293,
.height = 165,
},
};
+static const struct drm_display_mode auo_b133htn01_mode = {
+ .clock = 150660,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 172,
+ .hsync_end = 1920 + 172 + 80,
+ .htotal = 1920 + 172 + 80 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 25,
+ .vsync_end = 1080 + 25 + 10,
+ .vtotal = 1080 + 25 + 10 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b133htn01 = {
+ .modes = &auo_b133htn01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .prepare = 105,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.clock = 72070,
.hdisplay = 1366,
@@ -329,6 +418,7 @@ static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
static const struct panel_desc chunghwa_claa101wa01a = {
.modes = &chunghwa_claa101wa01a_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 220,
.height = 120,
@@ -351,6 +441,7 @@ static const struct drm_display_mode chunghwa_claa101wb01_mode = {
static const struct panel_desc chunghwa_claa101wb01 = {
.modes = &chunghwa_claa101wb01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 223,
.height = 125,
@@ -374,6 +465,7 @@ static const struct drm_display_mode edt_et057090dhu_mode = {
static const struct panel_desc edt_et057090dhu = {
.modes = &edt_et057090dhu_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 115,
.height = 86,
@@ -397,12 +489,82 @@ static const struct drm_display_mode edt_etm0700g0dh6_mode = {
static const struct panel_desc edt_etm0700g0dh6 = {
.modes = &edt_etm0700g0dh6_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 152,
.height = 91,
},
};
+static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
+ .clock = 32260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 168,
+ .hsync_end = 800 + 168 + 64,
+ .htotal = 800 + 168 + 64 + 88,
+ .vdisplay = 480,
+ .vsync_start = 480 + 37,
+ .vsync_end = 480 + 37 + 2,
+ .vtotal = 480 + 37 + 2 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc foxlink_fl500wvr00_a0t = {
+ .modes = &foxlink_fl500wvr00_a0t_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+};
+
+static const struct drm_display_mode innolux_n116bge_mode = {
+ .clock = 71000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 6,
+ .htotal = 1366 + 64 + 6 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 8,
+ .vsync_end = 768 + 8 + 4,
+ .vtotal = 768 + 8 + 4 + 8,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_n116bge = {
+ .modes = &innolux_n116bge_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+};
+
+static const struct drm_display_mode innolux_n156bge_l21_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 16,
+ .hsync_end = 1366 + 16 + 34,
+ .htotal = 1366 + 16 + 34 + 50,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 6,
+ .vtotal = 768 + 2 + 6 + 12,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_n156bge_l21 = {
+ .modes = &innolux_n156bge_l21_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 344,
+ .height = 193,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -419,6 +581,7 @@ static const struct drm_display_mode lg_lp129qe_mode = {
static const struct panel_desc lg_lp129qe = {
.modes = &lg_lp129qe_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 272,
.height = 181,
@@ -441,6 +604,7 @@ static const struct drm_display_mode samsung_ltn101nt05_mode = {
static const struct panel_desc samsung_ltn101nt05 = {
.modes = &samsung_ltn101nt05_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 1024,
.height = 600,
@@ -452,6 +616,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
+ .compatible = "auo,b133htn01",
+ .data = &auo_b133htn01,
+ }, {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
@@ -470,14 +637,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0700g0dh6",
.data = &edt_etm0700g0dh6,
}, {
+ .compatible = "foxlink,fl500wvr00-a0t",
+ .data = &foxlink_fl500wvr00_a0t,
+ }, {
+ .compatible = "innolux,n116bge",
+ .data = &innolux_n116bge,
+ }, {
+ .compatible = "innolux,n156bge-l21",
+ .data = &innolux_n156bge_l21,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
- .compatible = "simple-panel",
- }, {
/* sentinel */
}
};
@@ -545,7 +719,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
.height = 151,
},
},
- .flags = MIPI_DSI_MODE_VIDEO,
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
@@ -599,7 +773,8 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.height = 136,
},
},
- .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 34d6a85e9023..0bf1e20c6e44 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
pending = xchg(&qdev->ram_header->int_pending, 0);
+ if (!pending)
+ return IRQ_NONE;
+
atomic_inc(&qdev->irq_received);
if (pending & QXL_INTERRUPT_DISPLAY) {
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index d458a140c024..83a423293afd 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -31,7 +31,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
@@ -67,7 +67,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index dbcbfe80aac0..0013ad0db9ef 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o
+ ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o
# add async DMA block
radeon-y += \
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a03c73411a56..30d242b25078 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+ /* set pageflip to happen only at start of vblank interval (front porch) */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
@@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+ /* set pageflip to happen only at start of vblank interval (front porch) */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 35f4182c63b6..b1e11f8434e2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -127,7 +127,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
/* flags not zero */
if (args.v1.ucReplyStatus == 2) {
DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
- r = -EBUSY;
+ r = -EIO;
goto done;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2b2908440644..a7f2ddf09a9d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
- u8 backlight_level;
char bl_name[16];
/* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
pdata->encoder = radeon_encoder;
- backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
-
dig = radeon_encoder->enc_priv;
dig->bl_dev = bd;
bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+ /* Set a reasonable default here if the level is 0 otherwise
+ * fbdev will attempt to turn the backlight on after console
+ * unblanking and it will try and restore 0 which turns the backlight
+ * off again.
+ */
+ if (bd->props.brightness == 0)
+ bd->props.brightness = RADEON_MAX_BL_LEVEL;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
@@ -327,12 +331,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
- /* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ /* get the native mode for scaling */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
-
- /* get the native mode for TV */
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ } else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
if (tv_dac->tv_std == TV_STD_NTSC ||
@@ -342,6 +344,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
else
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
}
+ } else if (radeon_encoder->rmx_type != RMX_OFF) {
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
}
if (ASIC_IS_DCE3(rdev) &&
@@ -712,7 +716,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if (radeon_connector->use_digital &&
(radeon_connector->audio == RADEON_AUDIO_ENABLE))
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
@@ -731,7 +735,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
@@ -751,7 +755,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
} else if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 10dae4106c08..022561e28707 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -940,7 +940,18 @@ static void ci_get_leakage_voltages(struct radeon_device *rdev)
pi->vddc_leakage.count = 0;
pi->vddci_leakage.count = 0;
- if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
+ continue;
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+ pi->vddc_leakage.count++;
+ }
+ }
+ } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
@@ -1179,7 +1190,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
tmp &= ~GLOBAL_PWRMGT_EN;
WREG32_SMC(GENERAL_PWRMGT, tmp);
- tmp = RREG32(SCLK_PWRMGT_CNTL);
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
tmp &= ~DYNAMIC_PM_EN;
WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 8debc9d47362..b630edc2fd0c 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -213,24 +213,37 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
if (!rdev->smc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- ucode_start_address = BONAIRE_SMC_UCODE_START;
- ucode_size = BONAIRE_SMC_UCODE_SIZE;
- break;
- case CHIP_HAWAII:
- ucode_start_address = HAWAII_SMC_UCODE_START;
- ucode_size = HAWAII_SMC_UCODE_SIZE;
- break;
- default:
- DRM_ERROR("unknown asic in smc ucode loader\n");
- BUG();
+ if (rdev->new_fw) {
+ const struct smc_firmware_header_v1_0 *hdr =
+ (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+ radeon_ucode_print_smc_hdr(&hdr->header);
+
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ ucode_start_address = BONAIRE_SMC_UCODE_START;
+ ucode_size = BONAIRE_SMC_UCODE_SIZE;
+ break;
+ case CHIP_HAWAII:
+ ucode_start_address = HAWAII_SMC_UCODE_START;
+ ucode_size = HAWAII_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+
+ src = (const u8 *)rdev->smc_fw->data;
}
if (ucode_size & 3)
return -EINVAL;
- src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index dcd4518a9b08..b625646bf3e2 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -42,6 +42,16 @@ MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin");
MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
+
+MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
+MODULE_FIRMWARE("radeon/bonaire_me.bin");
+MODULE_FIRMWARE("radeon/bonaire_ce.bin");
+MODULE_FIRMWARE("radeon/bonaire_mec.bin");
+MODULE_FIRMWARE("radeon/bonaire_mc.bin");
+MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
+MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
+MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+
MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
MODULE_FIRMWARE("radeon/HAWAII_me.bin");
MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
@@ -51,18 +61,45 @@ MODULE_FIRMWARE("radeon/HAWAII_mc2.bin");
MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
+
+MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
+MODULE_FIRMWARE("radeon/hawaii_me.bin");
+MODULE_FIRMWARE("radeon/hawaii_ce.bin");
+MODULE_FIRMWARE("radeon/hawaii_mec.bin");
+MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
+MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
+MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
+MODULE_FIRMWARE("radeon/kaveri_me.bin");
+MODULE_FIRMWARE("radeon/kaveri_ce.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
+MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
+MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
+
MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
MODULE_FIRMWARE("radeon/KABINI_me.bin");
MODULE_FIRMWARE("radeon/KABINI_ce.bin");
MODULE_FIRMWARE("radeon/KABINI_mec.bin");
MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kabini_pfp.bin");
+MODULE_FIRMWARE("radeon/kabini_me.bin");
+MODULE_FIRMWARE("radeon/kabini_ce.bin");
+MODULE_FIRMWARE("radeon/kabini_mec.bin");
+MODULE_FIRMWARE("radeon/kabini_rlc.bin");
+MODULE_FIRMWARE("radeon/kabini_sdma.bin");
+
MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
MODULE_FIRMWARE("radeon/MULLINS_me.bin");
MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
@@ -70,6 +107,13 @@ MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
+MODULE_FIRMWARE("radeon/mullins_pfp.bin");
+MODULE_FIRMWARE("radeon/mullins_me.bin");
+MODULE_FIRMWARE("radeon/mullins_ce.bin");
+MODULE_FIRMWARE("radeon/mullins_mec.bin");
+MODULE_FIRMWARE("radeon/mullins_rlc.bin");
+MODULE_FIRMWARE("radeon/mullins_sdma.bin");
+
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
@@ -1760,27 +1804,44 @@ static void cik_srbm_select(struct radeon_device *rdev,
*/
int ci_mc_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
+ const __be32 *fw_data = NULL;
+ const __le32 *new_fw_data = NULL;
u32 running, blackout = 0;
- u32 *io_mc_regs;
+ u32 *io_mc_regs = NULL;
+ const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
- ucode_size = rdev->mc_fw->size / 4;
+ if (rdev->new_fw) {
+ const struct mc_firmware_header_v1_0 *hdr =
+ (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- io_mc_regs = (u32 *)&bonaire_io_mc_regs;
- regs_size = BONAIRE_IO_MC_REGS_SIZE;
- break;
- case CHIP_HAWAII:
- io_mc_regs = (u32 *)&hawaii_io_mc_regs;
- regs_size = HAWAII_IO_MC_REGS_SIZE;
- break;
- default:
- return -EINVAL;
+ radeon_ucode_print_mc_hdr(&hdr->header);
+
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ ucode_size = rdev->mc_fw->size / 4;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ io_mc_regs = (u32 *)&bonaire_io_mc_regs;
+ regs_size = BONAIRE_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAWAII:
+ io_mc_regs = (u32 *)&hawaii_io_mc_regs;
+ regs_size = HAWAII_IO_MC_REGS_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ fw_data = (const __be32 *)rdev->mc_fw->data;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1797,13 +1858,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
- WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ if (rdev->new_fw) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ } else {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
}
/* load the MC ucode */
- fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < ucode_size; i++)
- WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ for (i = 0; i < ucode_size; i++) {
+ if (rdev->new_fw)
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ else
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ }
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
@@ -1841,17 +1910,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
static int cik_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
+ const char *new_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
mec_req_size, rlc_req_size, mc_req_size = 0,
sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
char fw_name[30];
+ int new_fw = 0;
int err;
+ int num_fw;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_BONAIRE:
chip_name = "BONAIRE";
+ new_chip_name = "bonaire";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
@@ -1861,9 +1934,11 @@ static int cik_init_microcode(struct radeon_device *rdev)
mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
+ num_fw = 8;
break;
case CHIP_HAWAII:
chip_name = "HAWAII";
+ new_chip_name = "hawaii";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
@@ -1873,142 +1948,285 @@ static int cik_init_microcode(struct radeon_device *rdev)
mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
+ num_fw = 8;
break;
case CHIP_KAVERI:
chip_name = "KAVERI";
+ new_chip_name = "kaveri";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = KV_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 7;
break;
case CHIP_KABINI:
chip_name = "KABINI";
+ new_chip_name = "kabini";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = KB_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 6;
break;
case CHIP_MULLINS:
chip_name = "MULLINS";
+ new_chip_name = "mullins";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = ML_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 6;
break;
default: BUG();
}
- DRM_INFO("Loading %s Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", new_chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
- err = -EINVAL;
- goto out;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->pfp_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->me_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->ce_fw->size != ce_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->ce_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->ce_fw->size != ce_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->ce_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name);
err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->mec_fw->size != mec_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->mec_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->mec_fw->size != mec_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mec_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->mec_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ if (rdev->family == CHIP_KAVERI) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name);
+ err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev);
+ if (err) {
+ goto out;
+ } else {
+ err = radeon_ucode_validate(rdev->mec2_fw);
+ if (err) {
+ goto out;
+ } else {
+ new_fw++;
+ }
+ }
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
- "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->rlc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name);
err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->sdma_fw->size != sdma_req_size) {
- printk(KERN_ERR
- "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
- rdev->sdma_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->sdma_fw->size != sdma_req_size) {
+ printk(KERN_ERR
+ "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
+ rdev->sdma_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->sdma_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
/* No SMC, MC ucode on APUs */
if (!(rdev->flags & RADEON_IS_IGP)) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ }
+ if ((rdev->mc_fw->size != mc_req_size) &&
+ (rdev->mc_fw->size != mc2_req_size)){
+ printk(KERN_ERR
+ "cik_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+ } else {
+ err = radeon_ucode_validate(rdev->mc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
goto out;
+ } else {
+ new_fw++;
+ }
}
- if ((rdev->mc_fw->size != mc_req_size) &&
- (rdev->mc_fw->size != mc2_req_size)){
- printk(KERN_ERR
- "cik_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "cik_smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "cik_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->smc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
}
+ if (new_fw == 0) {
+ rdev->new_fw = false;
+ } else if (new_fw < num_fw) {
+ printk(KERN_ERR "ci_fw: mixing new and old firmware!\n");
+ err = -EINVAL;
+ } else {
+ rdev->new_fw = true;
+ }
+
out:
if (err) {
if (err != -EINVAL)
@@ -2021,8 +2239,14 @@ out:
rdev->me_fw = NULL;
release_firmware(rdev->ce_fw);
rdev->ce_fw = NULL;
+ release_firmware(rdev->mec_fw);
+ rdev->mec_fw = NULL;
+ release_firmware(rdev->mec2_fw);
+ rdev->mec2_fw = NULL;
release_firmware(rdev->rlc_fw);
rdev->rlc_fw = NULL;
+ release_firmware(rdev->sdma_fw);
+ rdev->sdma_fw = NULL;
release_firmware(rdev->mc_fw);
rdev->mc_fw = NULL;
release_firmware(rdev->smc_fw);
@@ -2291,6 +2515,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 8) {
@@ -3665,8 +3890,6 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
- /* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
/**
@@ -3695,8 +3918,6 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, upper_32_bits(addr));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
- /* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -3968,7 +4189,6 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
*/
static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
@@ -3976,26 +4196,70 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
cik_cp_gfx_enable(rdev, false);
- /* PFP */
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
- WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __be32 *)rdev->ce_fw->data;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
- WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)rdev->me_fw->data;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
- WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *pfp_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ const struct gfx_firmware_header_v1_0 *ce_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ const struct gfx_firmware_header_v1_0 *me_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+ radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+ radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ }
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
@@ -4260,7 +4524,6 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
*/
static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->mec_fw)
@@ -4268,20 +4531,55 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
cik_cp_compute_enable(rdev, false);
- /* MEC1 */
- fw_data = (const __be32 *)rdev->mec_fw->data;
- WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
- for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
- WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *mec_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&mec_hdr->header);
+
+ /* MEC1 */
+ fw_data = (const __le32 *)
+ (rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
- if (rdev->family == CHIP_KAVERI) {
/* MEC2 */
+ if (rdev->family == CHIP_KAVERI) {
+ const struct gfx_firmware_header_v1_0 *mec2_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+
+ fw_data = (const __le32 *)
+ (rdev->mec2_fw->data +
+ le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ }
+ } else {
+ const __be32 *fw_data;
+
+ /* MEC1 */
fw_data = (const __be32 *)rdev->mec_fw->data;
- WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
- WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+
+ if (rdev->family == CHIP_KAVERI) {
+ /* MEC2 */
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
+ WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ }
}
return 0;
@@ -4374,7 +4672,7 @@ static int cik_mec_init(struct radeon_device *rdev)
r = radeon_bo_create(rdev,
rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->mec.hpd_eop_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -4544,7 +4842,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
r = radeon_bo_create(rdev,
sizeof(struct bonaire_mqd),
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->ring[idx].mqd_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
@@ -5401,7 +5699,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
@@ -5641,12 +5938,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
+ int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
if (vm == NULL)
return;
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
radeon_ring_write(ring,
@@ -5696,7 +5994,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 1 << vm->id);
/* compute doesn't have PFP */
- if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
+ if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
@@ -5864,28 +6162,10 @@ static void cik_rlc_start(struct radeon_device *rdev)
static int cik_rlc_resume(struct radeon_device *rdev)
{
u32 i, size, tmp;
- const __be32 *fw_data;
if (!rdev->rlc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- default:
- size = BONAIRE_RLC_UCODE_SIZE;
- break;
- case CHIP_KAVERI:
- size = KV_RLC_UCODE_SIZE;
- break;
- case CHIP_KABINI:
- size = KB_RLC_UCODE_SIZE;
- break;
- case CHIP_MULLINS:
- size = ML_RLC_UCODE_SIZE;
- break;
- }
-
cik_rlc_stop(rdev);
/* disable CG */
@@ -5909,11 +6189,45 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- fw_data = (const __be32 *)rdev->rlc_fw->data;
+ if (rdev->new_fw) {
+ const struct rlc_firmware_header_v1_0 *hdr =
+ (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+ const __le32 *fw_data = (const __le32 *)
+ (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ radeon_ucode_print_rlc_hdr(&hdr->header);
+
+ size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ for (i = 0; i < size; i++)
+ WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(RLC_GPM_UCODE_ADDR, 0);
- for (i = 0; i < size; i++)
- WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(RLC_GPM_UCODE_ADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ default:
+ size = BONAIRE_RLC_UCODE_SIZE;
+ break;
+ case CHIP_KAVERI:
+ size = KV_RLC_UCODE_SIZE;
+ break;
+ case CHIP_KABINI:
+ size = KB_RLC_UCODE_SIZE;
+ break;
+ case CHIP_MULLINS:
+ size = ML_RLC_UCODE_SIZE;
+ break;
+ }
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ for (i = 0; i < size; i++)
+ WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ }
/* XXX - find out what chips support lbpw */
cik_enable_lbpw(rdev, false);
@@ -6347,11 +6661,10 @@ static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
void cik_init_cp_pg_table(struct radeon_device *rdev)
{
- const __be32 *fw_data;
volatile u32 *dst_ptr;
int me, i, max_me = 4;
u32 bo_offset = 0;
- u32 table_offset;
+ u32 table_offset, table_size;
if (rdev->family == CHIP_KAVERI)
max_me = 5;
@@ -6362,24 +6675,71 @@ void cik_init_cp_pg_table(struct radeon_device *rdev)
/* write the cp table buffer */
dst_ptr = rdev->rlc.cp_table_ptr;
for (me = 0; me < max_me; me++) {
- if (me == 0) {
- fw_data = (const __be32 *)rdev->ce_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
- } else if (me == 1) {
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
- } else if (me == 2) {
- fw_data = (const __be32 *)rdev->me_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
+ if (rdev->new_fw) {
+ const __le32 *fw_data;
+ const struct gfx_firmware_header_v1_0 *hdr;
+
+ if (me == 0) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+ bo_offset += table_size;
} else {
- fw_data = (const __be32 *)rdev->mec_fw->data;
- table_offset = CP_MEC_TABLE_OFFSET;
- }
+ const __be32 *fw_data;
+ table_size = CP_ME_TABLE_SIZE;
+
+ if (me == 0) {
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 1) {
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 2) {
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else {
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ table_offset = CP_MEC_TABLE_OFFSET;
+ }
- for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
- dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
+ }
+ bo_offset += table_size;
}
- bo_offset += CP_ME_TABLE_SIZE;
}
}
@@ -7376,6 +7736,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -7616,7 +7977,8 @@ restart_ih:
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
@@ -7676,14 +8038,16 @@ restart_ih:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
cik_vm_decode_fault(rdev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 167: /* VCE */
DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
@@ -7896,6 +8260,7 @@ restart_ih:
static int cik_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
+ u32 nop;
int r;
/* enable pcie gen2/3 link */
@@ -8029,9 +8394,18 @@ static int cik_startup(struct radeon_device *rdev)
}
cik_irq_set(rdev);
+ if (rdev->family == CHIP_HAWAII) {
+ if (rdev->new_fw)
+ nop = PACKET3(PACKET3_NOP, 0x3FFF);
+ else
+ nop = RADEON_CP_PACKET2;
+ } else {
+ nop = PACKET3(PACKET3_NOP, 0x3FFF);
+ }
+
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
@@ -8039,7 +8413,7 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
ring->me = 1; /* first MEC */
@@ -8050,7 +8424,7 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
/* dGPU only have 1 MEC */
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 8e9d0f1d858e..bcf480510ac2 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_ucode.h"
#include "radeon_asic.h"
#include "radeon_trace.h"
#include "cikd.h"
@@ -118,6 +119,7 @@ void cik_sdma_set_wptr(struct radeon_device *rdev,
reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+ (void)RREG32(reg);
}
/**
@@ -419,7 +421,6 @@ static int cik_sdma_rlc_resume(struct radeon_device *rdev)
*/
static int cik_sdma_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->sdma_fw)
@@ -428,19 +429,48 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
/* halt the MEs */
cik_sdma_enable(rdev, false);
- /* sdma0 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- /* sdma1 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ if (rdev->new_fw) {
+ const struct sdma_firmware_header_v1_0 *hdr =
+ (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_sdma_hdr(&hdr->header);
+
+ /* sdma0 */
+ fw_data = (const __le32 *)
+ (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __le32 *)
+ (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ } else {
+ const __be32 *fw_data;
+
+ /* sdma0 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ }
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
@@ -719,7 +749,43 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
- * cik_sdma_vm_set_page - update the page tables using sDMA
+ * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0x1FFFF8)
+ bytes = 0x1FFFF8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+}
+
+/**
+ * cik_sdma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
@@ -729,84 +795,103 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using sDMA (CIK).
+ * Update PTEs by writing them manually using sDMA (CIK).
*/
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if (flags == R600_PTE_GART) {
- uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
- } else if (flags & R600_PTE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & R600_PTE_VALID)
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
}
}
+}
+
+/**
+ * cik_sdma_vm_set_pages - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count;
+ if (ndw > 0x7FFFF)
+ ndw = 0x7FFFF;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+ pe += ndw * 8;
+ addr += ndw * incr;
+ count -= ndw;
+ }
+}
+
+/**
+ * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
+{
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 0a65dc7e93e7..ab29f953a767 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -136,13 +136,13 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
AUDIO_LIPSYNC(connector->audio_latency[1]);
else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
} else {
if (connector->latency_present[0])
tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
AUDIO_LIPSYNC(connector->audio_latency[0]);
else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
@@ -164,8 +164,10 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -173,7 +175,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
@@ -225,8 +227,10 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -234,7 +238,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e2f605224e8c..4fedd14e670a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002,
0x913c, 0x0000000f, 0x0000000a
};
@@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002
};
@@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] =
static const u32 supersumo_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] =
static const u32 wrestler_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -2424,7 +2424,6 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -2642,8 +2641,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x3) != 0) {
- tmp &= ~0x3;
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -2676,7 +2676,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
@@ -4022,7 +4022,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -4100,7 +4101,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
@@ -4174,8 +4176,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.cp_table_size) {
if (rdev->rlc.cp_table_obj == NULL) {
- r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
+ r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
+ PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.cp_table_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
@@ -4755,6 +4759,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -4959,7 +4964,8 @@ restart_ih:
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
@@ -5066,14 +5072,16 @@ restart_ih:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
cayman_vm_decode_fault(rdev, status, addr);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 1ec0e6e83f9f..278c7a139d74 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -117,7 +117,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
@@ -172,7 +172,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 333d143fca2c..23bff590fb6e 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -239,7 +239,6 @@
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
-#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 5a33ca681867..327b85f7fd0d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1229,7 +1229,6 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 6378e0276691..8a3e6221cece 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
- * cayman_dma_vm_set_page - update the page tables using the DMA
+ * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += ndw * 4;
+ src += ndw * 4;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * cayman_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
@@ -315,71 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: hw access flags
+ * @flags: hw access flags
*
- * Update the page tables using the DMA (cayman/TN).
+ * Update PTEs by writing them manually using the DMA (cayman/TN).
*/
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & R600_PTE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & R600_PTE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & R600_PTE_VALID)
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
+ 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
}
}
+}
+
+/**
+ * cayman_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
+{
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 1544efcf1c3a..04b5940b8923 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -652,7 +652,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
- radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
@@ -683,7 +682,7 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
}
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr)
+ uint64_t addr, uint32_t flags)
{
u32 *gtt = rdev->gart.ptr;
gtt[i] = cpu_to_le32(lower_32_bits(addr));
@@ -838,11 +837,7 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
- RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+ r100_ring_hdp_flush(rdev, ring);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
@@ -1061,6 +1056,20 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
(void)RREG32(RADEON_CP_RB_WPTR);
}
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -1401,7 +1410,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
*/
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, waitreloc;
@@ -1441,12 +1449,11 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = R100_CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
@@ -4067,39 +4074,6 @@ int r100_init(struct radeon_device *rdev)
return 0;
}
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
- bool always_indirect)
-{
- if (reg < rdev->rmmio_size && !always_indirect)
- return readl(((void __iomem *)rdev->rmmio) + reg);
- else {
- unsigned long flags;
- uint32_t ret;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
- return ret;
- }
-}
-
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
- bool always_indirect)
-{
- if (reg < rdev->rmmio_size && !always_indirect)
- writel(v, ((void __iomem *)rdev->rmmio) + reg);
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
- }
-}
-
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3c21d77a483d..75b30338c226 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -69,17 +69,23 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
mb();
}
+#define R300_PTE_UNSNOOPED (1 << 0)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr)
+ uint64_t addr, uint32_t flags)
{
void __iomem *ptr = rdev->gart.ptr;
addr = (lower_32_bits(addr) >> 8) |
- ((upper_32_bits(addr) & 0xff) << 24) |
- R300_PTE_WRITEABLE | R300_PTE_READABLE;
+ ((upper_32_bits(addr) & 0xff) << 24);
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= R300_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= R300_PTE_WRITEABLE;
+ if (!(flags & RADEON_GART_PAGE_SNOOP))
+ addr |= R300_PTE_UNSNOOPED;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -120,7 +126,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d4b00c..c70a504d96af 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -968,7 +968,6 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -1339,7 +1338,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->vram_scratch.robj);
+ 0, NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
@@ -3227,7 +3226,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_GTT, 0,
NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
@@ -3795,6 +3794,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -3923,11 +3923,13 @@ restart_ih:
break;
case 9: /* D1 pflip */
DRM_DEBUG("IH: D1 flip\n");
- radeon_crtc_handle_flip(rdev, 0);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, 0);
break;
case 11: /* D2 pflip */
DRM_DEBUG("IH: D2 flip\n");
- radeon_crtc_handle_flip(rdev, 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, 1);
break;
case 19: /* HPD/DAC hotplug */
switch (src_data) {
@@ -4088,16 +4090,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
}
/**
- * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
* rdev: radeon device structure
- * bo: buffer object struct which userspace is waiting for idle
*
- * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
- * through ring buffer, this leads to corruption in rendering, see
- * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
- * directly perform HDP flush by writing register through MMIO.
+ * Some R6XX/R7XX don't seem to take into account HDP flushes performed
+ * through the ring buffer. This leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
+ * directly perform the HDP flush by writing the register through MMIO.
*/
-void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+void r600_mmio_hdp_flush(struct radeon_device *rdev)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 12511bb5fd6f..c47537a1ddba 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -825,7 +825,6 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
uint32_t *vline_start_end,
uint32_t *vline_status)
{
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, wait_reg_mem;
@@ -887,12 +886,11 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = R600_CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 29d9cc04c04e..9e1732eb402c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -64,6 +64,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/interval_tree.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -103,6 +104,7 @@ extern int radeon_hard_reset;
extern int radeon_vm_size;
extern int radeon_vm_block_size;
extern int radeon_deep_color;
+extern int radeon_use_pflipirq;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -304,6 +306,9 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
u16 *vddc, u16 *vddci,
u16 virtual_voltage_id,
u16 vbios_voltage_id);
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+ u16 virtual_voltage_id,
+ u16 *voltage);
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
u8 voltage_type,
u16 nominal_voltage,
@@ -317,6 +322,9 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
struct atom_voltage_table *voltage_table);
bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
u8 voltage_type, u8 voltage_mode);
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id);
void radeon_atom_update_memory_dll(struct radeon_device *rdev,
u32 mem_clock);
void radeon_atom_set_ac_timing(struct radeon_device *rdev,
@@ -441,14 +449,13 @@ struct radeon_mman {
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- uint64_t soffset;
- uint64_t eoffset;
uint32_t flags;
- bool valid;
+ uint64_t addr;
unsigned ref_count;
/* protected by vm mutex */
- struct list_head vm_list;
+ struct interval_tree_node it;
+ struct list_head vm_status;
/* constant after initialization */
struct radeon_vm *vm;
@@ -464,6 +471,7 @@ struct radeon_bo {
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
+ u32 flags;
unsigned pin_count;
void *kptr;
u32 tiling_flags;
@@ -542,9 +550,9 @@ struct radeon_gem {
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
-int radeon_gem_object_create(struct radeon_device *rdev, int size,
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int alignment, int initial_domain,
- bool discardable, bool kernel,
+ u32 flags, bool kernel,
struct drm_gem_object **obj);
int radeon_mode_dumb_create(struct drm_file *file_priv,
@@ -589,6 +597,12 @@ struct radeon_mc;
#define RADEON_GPU_PAGE_SHIFT 12
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
+#define RADEON_GART_PAGE_DUMMY 0
+#define RADEON_GART_PAGE_VALID (1 << 0)
+#define RADEON_GART_PAGE_READ (1 << 1)
+#define RADEON_GART_PAGE_WRITE (1 << 2)
+#define RADEON_GART_PAGE_SNOOP (1 << 3)
+
struct radeon_gart {
dma_addr_t table_addr;
struct radeon_bo *robj;
@@ -613,8 +627,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist,
- dma_addr_t *dma_addr);
-void radeon_gart_restore(struct radeon_device *rdev);
+ dma_addr_t *dma_addr, uint32_t flags);
/*
@@ -684,10 +697,9 @@ struct radeon_flip_work {
struct work_struct unpin_work;
struct radeon_device *rdev;
int crtc_id;
- struct drm_framebuffer *fb;
+ uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct radeon_bo *new_rbo;
struct radeon_fence *fence;
};
@@ -855,9 +867,9 @@ struct radeon_mec {
#define R600_PTE_FRAG_64KB (4 << 7)
#define R600_PTE_FRAG_256KB (6 << 7)
-/* flags used for GART page table entries on R600+ */
-#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \
- | R600_PTE_READABLE | R600_PTE_WRITEABLE)
+/* flags needed to be set so we can copy directly from the GART table */
+#define R600_PTE_GART_MASK ( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
+ R600_PTE_SYSTEM | R600_PTE_VALID )
struct radeon_vm_pt {
struct radeon_bo *bo;
@@ -865,9 +877,15 @@ struct radeon_vm_pt {
};
struct radeon_vm {
- struct list_head va;
+ struct rb_root va;
unsigned id;
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head invalidated;
+
+ /* BOs freed, but not yet updated in the PT */
+ struct list_head freed;
+
/* contains the page directory */
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
@@ -876,6 +894,8 @@ struct radeon_vm {
/* array of page tables, one for each page directory entry */
struct radeon_vm_pt *page_tables;
+ struct radeon_bo_va *ib_bo_va;
+
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -1735,6 +1755,7 @@ struct radeon_asic_ring {
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -1758,13 +1779,8 @@ struct radeon_asic {
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
int (*asic_reset)(struct radeon_device *rdev);
- /* ioctl hw specific callback. Some hw might want to perform special
- * operation on specific ioctl. For instance on wait idle some hw
- * might want to perform and HDP flush through MMIO as it seems that
- * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
- * through ring.
- */
- void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ /* Flush the HDP cache via MMIO */
+ void (*mmio_hdp_flush)(struct radeon_device *rdev);
/* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
@@ -1777,16 +1793,26 @@ struct radeon_asic {
struct {
void (*tlb_flush)(struct radeon_device *rdev);
void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
- void (*set_page)(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+ void (*copy_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+ void (*write_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ void (*set_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ void (*pad_ib)(struct radeon_ib *ib);
} vm;
/* ring specific callbacks */
struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
@@ -2294,10 +2320,12 @@ struct radeon_device {
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
const struct firmware *mec_fw; /* CIK MEC firmware */
+ const struct firmware *mec2_fw; /* KV MEC2 firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
const struct firmware *uvd_fw; /* UVD firmware */
const struct firmware *vce_fw; /* VCE firmware */
+ bool new_fw;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2337,6 +2365,11 @@ struct radeon_device {
struct dev_pm_domain vga_pm_domain;
bool have_disp_power_ref;
+ u32 px_quirk_flags;
+
+ /* tracking pinned memory */
+ u64 vram_pin_size;
+ u64 gart_pin_size;
};
bool radeon_is_px(struct drm_device *dev);
@@ -2347,10 +2380,42 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
- bool always_indirect);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
- bool always_indirect);
+#define RADEON_MIN_MMIO_SIZE 0x10000
+
+static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
+{
+ /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
+ if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+ return readl(((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
+ }
+}
+
+static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
+{
+ if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+ writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+ }
+}
+
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
@@ -2704,10 +2769,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
@@ -2833,9 +2901,12 @@ void radeon_vm_fence(struct radeon_device *rdev,
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm);
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+ struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
@@ -2848,8 +2919,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va);
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 34b9aa9e3c06..eeeeabe09758 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -185,6 +185,7 @@ static struct radeon_asic_ring r100_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
+ .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r100_asic = {
@@ -194,7 +195,7 @@ static struct radeon_asic r100_asic = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
@@ -260,7 +261,7 @@ static struct radeon_asic r200_asic = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
@@ -331,6 +332,7 @@ static struct radeon_asic_ring r300_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
+ .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r300_asic = {
@@ -340,7 +342,7 @@ static struct radeon_asic r300_asic = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
@@ -406,7 +408,7 @@ static struct radeon_asic r300_asic_pcie = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
@@ -472,7 +474,7 @@ static struct radeon_asic r420_asic = {
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
@@ -538,7 +540,7 @@ static struct radeon_asic rs400_asic = {
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
@@ -604,7 +606,7 @@ static struct radeon_asic rs600_asic = {
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
@@ -672,7 +674,7 @@ static struct radeon_asic rs690_asic = {
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
@@ -740,7 +742,7 @@ static struct radeon_asic rv515_asic = {
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
@@ -806,7 +808,7 @@ static struct radeon_asic r520_asic = {
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
@@ -898,7 +900,7 @@ static struct radeon_asic r600_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -970,7 +972,7 @@ static struct radeon_asic rv6xx_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1060,7 +1062,7 @@ static struct radeon_asic rs780_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1163,7 +1165,7 @@ static struct radeon_asic rv770_asic = {
.resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1281,7 +1283,7 @@ static struct radeon_asic evergreen_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1373,7 +1375,7 @@ static struct radeon_asic sumo_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1464,7 +1466,7 @@ static struct radeon_asic btc_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1599,7 +1601,7 @@ static struct radeon_asic cayman_asic = {
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1611,7 +1613,10 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .set_page = &cayman_dma_vm_set_page,
+ .copy_pages = &cayman_dma_vm_copy_pages,
+ .write_pages = &cayman_dma_vm_write_pages,
+ .set_pages = &cayman_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1699,7 +1704,7 @@ static struct radeon_asic trinity_asic = {
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1711,7 +1716,10 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .set_page = &cayman_dma_vm_set_page,
+ .copy_pages = &cayman_dma_vm_copy_pages,
+ .write_pages = &cayman_dma_vm_write_pages,
+ .set_pages = &cayman_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1829,7 +1837,7 @@ static struct radeon_asic si_asic = {
.resume = &si_resume,
.asic_reset = &si_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &si_get_xclk,
@@ -1841,7 +1849,10 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .set_page = &si_dma_vm_set_page,
+ .copy_pages = &si_dma_vm_copy_pages,
+ .write_pages = &si_dma_vm_write_pages,
+ .set_pages = &si_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1987,7 +1998,7 @@ static struct radeon_asic ci_asic = {
.resume = &cik_resume,
.asic_reset = &cik_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
@@ -1999,7 +2010,10 @@ static struct radeon_asic ci_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .set_page = &cik_sdma_vm_set_page,
+ .copy_pages = &cik_sdma_vm_copy_pages,
+ .write_pages = &cik_sdma_vm_write_pages,
+ .set_pages = &cik_sdma_vm_set_pages,
+ .pad_ib = &cik_sdma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2091,7 +2105,7 @@ static struct radeon_asic kv_asic = {
.resume = &cik_resume,
.asic_reset = &cik_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
@@ -2103,7 +2117,10 @@ static struct radeon_asic kv_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .set_page = &cik_sdma_vm_set_page,
+ .copy_pages = &cik_sdma_vm_copy_pages,
+ .write_pages = &cik_sdma_vm_write_pages,
+ .set_pages = &cik_sdma_vm_set_pages,
+ .pad_ib = &cik_sdma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2457,7 +2474,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2476,7 +2493,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
@@ -2502,7 +2519,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2530,7 +2547,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 01e7c0ad8f01..275a5dc01780 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -68,7 +68,7 @@ int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@@ -148,7 +148,8 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r100_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
-
+void r100_ring_hdp_flush(struct radeon_device *rdev,
+ struct radeon_ring *ring);
/*
* r200,rv250,rs300,rv280
*/
@@ -173,7 +174,7 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -209,7 +210,7 @@ extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -233,7 +234,7 @@ void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
@@ -351,7 +352,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
-extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern void r600_mmio_hdp_flush(struct radeon_device *rdev);
extern bool r600_gui_idle(struct radeon_device *rdev);
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
@@ -606,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -693,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
-void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -771,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
+
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 173f378428a9..92b2d8dd4735 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1963,7 +1963,7 @@ static const char *thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"asc7xxx",
};
@@ -1974,7 +1974,7 @@ static const char *pp_lib_thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"RV6xx",
"RV770",
@@ -3236,6 +3236,41 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
return 0;
}
+union get_voltage_info {
+ struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
+ struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
+};
+
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+ u16 virtual_voltage_id,
+ u16 *voltage)
+{
+ int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
+ u32 entry_id;
+ u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
+ union get_voltage_info args;
+
+ for (entry_id = 0; entry_id < count; entry_id++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
+ virtual_voltage_id)
+ break;
+ }
+
+ if (entry_id >= count)
+ return -EINVAL;
+
+ args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
+ args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+ args.in.ulSCLKFreq =
+ cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
+
+ return 0;
+}
+
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
u16 voltage_level, u8 voltage_type,
u32 *gpio_value, u32 *gpio_mask)
@@ -3397,6 +3432,50 @@ radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
return false;
}
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+ int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+ union voltage_object_info *voltage_info;
+ union voltage_object *voltage_object = NULL;
+
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ voltage_info = (union voltage_object_info *)
+ (rdev->mode_info.atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 3:
+ switch (crev) {
+ case 1:
+ voltage_object = (union voltage_object *)
+ atom_lookup_voltage_object_v3(&voltage_info->v3,
+ voltage_type,
+ VOLTAGE_OBJ_SVID2);
+ if (voltage_object) {
+ *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+ *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
int radeon_atom_get_max_voltage(struct radeon_device *rdev,
u8 voltage_type, u16 *max_voltage)
{
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 6e05a2e75a46..69f5695bdab9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -97,7 +97,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
int time;
n = RADEON_BENCHMARK_ITERATIONS;
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -109,7 +109,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 8137b7a34696..300c4b3d4669 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -107,7 +107,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -115,7 +115,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -124,7 +124,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
- drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -148,7 +148,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
break;
}
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* hdmi deep color only implemented on DCE4+ */
if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
@@ -197,10 +197,19 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
connector->name, bpc);
}
}
+ else if (bpc > 8) {
+ /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+ connector->name);
+ bpc = 8;
+ }
}
- if ((radeon_deep_color == 0) && (bpc > 8))
+ if ((radeon_deep_color == 0) && (bpc > 8)) {
+ DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n",
+ connector->name);
bpc = 8;
+ }
DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
connector->name, connector->display_info.bpc, bpc);
@@ -216,7 +225,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct drm_encoder *best_encoder = NULL;
struct drm_encoder *encoder = NULL;
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
- struct drm_mode_object *obj;
bool connected;
int i;
@@ -226,14 +234,11 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev,
+ connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
-
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -249,7 +254,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
@@ -257,34 +261,134 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
if (encoder->encoder_type == encoder_type)
return encoder;
}
return NULL;
}
+struct edid *radeon_connector_edid(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
+
+ if (radeon_connector->edid) {
+ return radeon_connector->edid;
+ } else if (edid_blob) {
+ struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
+ if (edid)
+ radeon_connector->edid = edid;
+ }
+ return radeon_connector->edid;
+}
+
+static void radeon_connector_get_edid(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->edid)
+ return;
+
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+ if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) &&
+ radeon_connector->ddc_bus->has_aux) {
+ radeon_connector->edid = drm_get_edid(connector,
+ &radeon_connector->ddc_bus->aux.ddc);
+ } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
+ radeon_connector->ddc_bus->has_aux)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->aux.ddc);
+ else if (radeon_connector->ddc_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ } else if (radeon_connector->ddc_bus) {
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ }
+
+ if (!radeon_connector->edid) {
+ if (rdev->is_atom_bios) {
+ /* some laptops provide a hardcoded edid in rom for LCDs */
+ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ } else {
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ }
+ }
+}
+
+static void radeon_connector_free_edid(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+}
+
+static int radeon_ddc_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
+ ret = drm_add_edid_modes(connector, radeon_connector->edid);
+ drm_edid_to_eld(connector, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
+static void radeon_get_native_mode(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ struct radeon_encoder *radeon_encoder;
+
+ if (encoder == NULL)
+ return;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode =
+ list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ radeon_encoder->native_mode = *preferred_mode;
+ } else {
+ radeon_encoder->native_mode.clock = 0;
+ }
+}
+
/*
* radeon_connector_analog_encoder_conflict_solve
* - search for other connectors sharing this encoder
@@ -585,6 +689,35 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
radeon_property_change_mode(&radeon_encoder->base);
}
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum radeon_rmx_type rmx_type;
+
+ if (connector->encoder)
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ else {
+ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+ }
+
+ switch (val) {
+ default:
+ case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+ case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+ case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+ case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ }
+ if (radeon_encoder->rmx_type == rmx_type)
+ return 0;
+
+ if ((rmx_type != DRM_MODE_SCALE_NONE) &&
+ (radeon_encoder->native_mode.clock == 0))
+ return 0;
+
+ radeon_encoder->rmx_type = rmx_type;
+
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+
return 0;
}
@@ -625,22 +758,20 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
static int radeon_lvds_get_modes(struct drm_connector *connector)
{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int ret = 0;
struct drm_display_mode *mode;
- if (radeon_connector->ddc_bus) {
- ret = radeon_ddc_get_modes(radeon_connector);
- if (ret > 0) {
- encoder = radeon_best_single_encoder(connector);
- if (encoder) {
- radeon_fixup_lvds_native_mode(encoder, connector);
- /* add scaled modes */
- radeon_add_common_modes(encoder, connector);
- }
- return ret;
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+ if (ret > 0) {
+ encoder = radeon_best_single_encoder(connector);
+ if (encoder) {
+ radeon_fixup_lvds_native_mode(encoder, connector);
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
}
+ return ret;
}
encoder = radeon_best_single_encoder(connector);
@@ -715,16 +846,9 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
}
/* check for edid as well */
+ radeon_connector_get_edid(connector);
if (radeon_connector->edid)
ret = connector_status_connected;
- else {
- if (radeon_connector->ddc_bus) {
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- if (radeon_connector->edid)
- ret = connector_status_connected;
- }
- }
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
@@ -737,8 +861,7 @@ static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->edid)
- kfree(radeon_connector->edid);
+ radeon_connector_free_edid(connector);
kfree(radeon_connector->con_priv);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -797,10 +920,12 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
static int radeon_vga_get_modes(struct drm_connector *connector)
{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+
+ radeon_get_native_mode(connector);
return ret;
}
@@ -843,28 +968,26 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector_free_edid(connector);
+ radeon_connector_get_edid(connector);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
ret = connector_status_connected;
} else {
- radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital =
+ !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
- } else
+ } else {
ret = connector_status_connected;
+ }
}
} else {
@@ -999,15 +1122,6 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.set_property = radeon_connector_set_property,
};
-static int radeon_dvi_get_modes(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret;
-
- ret = radeon_ddc_get_modes(radeon_connector);
- return ret;
-}
-
static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -1048,7 +1162,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_mode_object *obj;
int i, r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1066,18 +1179,16 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector_free_edid(connector);
+ radeon_connector_get_edid(connector);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
/* rs690 seems to have a problem with connectors not existing and always
* return a block of 0's. If we see this just stop polling on this output */
- if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) &&
+ radeon_connector->base.null_edid_counter) {
ret = connector_status_disconnected;
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
connector->name);
@@ -1087,18 +1198,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
broken_edid = true; /* defer use_digital to later */
}
} else {
- radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital =
+ !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
- } else
+ } else {
ret = connector_status_connected;
-
+ }
/* This gets complicated. We have boards with VGA + HDMI with a
* shared DDC line and we have boards with DVI-D + HDMI with a shared
* DDC line. The latter is more complex because with DVI<->HDMI adapters
@@ -1118,8 +1229,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
/* hpd is our only option in this case */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
}
}
@@ -1153,14 +1263,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev,
+ connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
-
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1225,19 +1332,16 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
-
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1252,13 +1356,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
@@ -1291,7 +1390,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1310,7 +1409,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
- .get_modes = radeon_dvi_get_modes,
+ .get_modes = radeon_vga_get_modes,
.mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
@@ -1339,7 +1438,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
@@ -1350,7 +1450,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
}
if (ret > 0) {
@@ -1383,7 +1484,10 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+
+ radeon_get_native_mode(connector);
}
return ret;
@@ -1391,7 +1495,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
{
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
@@ -1400,11 +1503,10 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1419,9 +1521,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
return ENCODER_OBJECT_ID_NONE;
}
-bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
+static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
@@ -1431,11 +1532,10 @@ bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1478,10 +1578,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
goto out;
}
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
+ radeon_connector_free_edid(connector);
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
@@ -1587,7 +1684,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return radeon_dp_mode_valid_helper(connector, mode);
} else {
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1747,6 +1844,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1768,6 +1868,10 @@ radeon_add_atom_connector(struct drm_device *dev,
0);
drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
@@ -1817,6 +1921,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -1835,6 +1943,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
@@ -1868,17 +1980,18 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- }
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -1918,17 +2031,18 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1965,18 +2079,18 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
-
- }
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a143461478..ee712c199b25 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
struct radeon_device *rdev = p->rdev;
+ struct radeon_bo_va *bo_va;
int i, r;
r = radeon_vm_update_page_directory(rdev, vm);
if (r)
return r;
- r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+ r = radeon_vm_clear_freed(rdev, vm);
+ if (r)
+ return r;
+
+ if (vm->ib_bo_va == NULL) {
+ DRM_ERROR("Tmp BO not in VM!\n");
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
&rdev->ring_tmp_bo.bo->tbo.mem);
if (r)
return r;
@@ -480,11 +490,18 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
continue;
bo = p->relocs[i].robj;
- r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+ bo_va = radeon_vm_bo_find(vm, bo);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
}
- return 0;
+
+ return radeon_vm_clear_invalids(rdev, vm);
}
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fab842d..c8ea050c8fa4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,31 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
+#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+
+struct radeon_px_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 px_quirk_flags;
+};
+
+static struct radeon_px_quirk radeon_px_quirk_list[] = {
+ /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
+ * https://bugzilla.kernel.org/show_bug.cgi?id=74551
+ */
+ { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
+ * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
+ /* macbook pro 8.2 */
+ { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
+ { 0, 0, 0, 0, 0 },
+};
+
bool radeon_is_px(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -112,6 +137,26 @@ bool radeon_is_px(struct drm_device *dev)
return false;
}
+static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
+{
+ struct radeon_px_quirk *p = radeon_px_quirk_list;
+
+ /* Apply PX quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ rdev->px_quirk_flags = p->px_quirk_flags;
+ break;
+ }
+ ++p;
+ }
+
+ if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
+ rdev->flags &= ~RADEON_IS_PX;
+}
+
/**
* radeon_program_register_sequence - program an array of registers.
*
@@ -385,7 +430,8 @@ int radeon_wb_init(struct radeon_device *rdev)
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
+ &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
@@ -1056,61 +1102,54 @@ static void radeon_check_arguments(struct radeon_device *rdev)
if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
- if (radeon_vm_size < 4) {
- dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+ if (radeon_vm_size < 1) {
+ dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
- if (radeon_vm_size > 1024*1024) {
- dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+ if (radeon_vm_size > 1024) {
+ dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
- if (radeon_vm_block_size < 9) {
- dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+ if (radeon_vm_block_size == -1) {
+
+ /* Total bits covered by PD + PTs */
+ unsigned bits = ilog2(radeon_vm_size) + 17;
+
+ /* Make sure the PD is 4K in size up to 8GB address space.
+ Above that split equal between PD and PTs */
+ if (radeon_vm_size <= 8)
+ radeon_vm_block_size = bits - 9;
+ else
+ radeon_vm_block_size = (bits + 3) / 2;
+
+ } else if (radeon_vm_block_size < 9) {
+ dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
if (radeon_vm_block_size > 24 ||
- radeon_vm_size < (1ull << radeon_vm_block_size)) {
- dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+ (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+ dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
}
/**
- * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
- * needed for waking up.
- *
- * @pdev: pci dev pointer
- */
-static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
-{
-
- /* 6600m in a macbook pro */
- if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
- pdev->subsystem_device == 0x00e2) {
- printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
- return true;
- }
-
- return false;
-}
-
-/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
@@ -1122,6 +1161,7 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct radeon_device *rdev = dev->dev_private;
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
@@ -1133,7 +1173,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+ if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
dev->pdev->d3_delay = 20;
radeon_resume_kms(dev, true, true);
@@ -1238,7 +1278,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits.
*/
- rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+ rdev->vm_manager.max_pfn = radeon_vm_size << 18;
/* Set asic functions */
r = radeon_asic_init(rdev);
@@ -1337,6 +1377,9 @@ int radeon_device_init(struct radeon_device *rdev,
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
+ if (rdev->flags & RADEON_IS_PX)
+ radeon_device_handle_px_quirks(rdev);
+
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 13896edcf0b6..3fdf87318069 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -293,6 +293,18 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
if (radeon_crtc == NULL)
return;
+ /* Skip the pageflip completion check below (based on polling) on
+ * asics which reliably support hw pageflip completion irqs. pflip
+ * irqs are a reliable and race-free method of handling pageflip
+ * completion detection. A use_pflipirq module parameter < 2 allows
+ * to override this in case of asics with faulty pflip irqs.
+ * A module parameter of 0 would only use this polling based path,
+ * a parameter of 1 would use pflip irq only as a backup to this
+ * path, as in Linux 3.16.
+ */
+ if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
+ return;
+
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
@@ -366,7 +378,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
- radeon_fence_unref(&work->fence);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
@@ -386,51 +397,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &radeon_crtc->base;
- struct drm_framebuffer *fb = work->fb;
-
- uint32_t tiling_flags, pitch_pixels;
- uint64_t base;
-
unsigned long flags;
int r;
down_read(&rdev->exclusive_lock);
- while (work->fence) {
+ if (work->fence) {
r = radeon_fence_wait(work->fence, false);
if (r == -EDEADLK) {
up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev);
down_read(&rdev->exclusive_lock);
}
+ if (r)
+ DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
+
+ /* We continue with the page flip even if we failed to wait on
+ * the fence, otherwise the DRM core and userspace will be
+ * confused about which BO the CRTC is scanning out
+ */
- if (r) {
- DRM_ERROR("failed to wait on page flip fence (%d)!\n",
- r);
- goto cleanup;
- } else
- radeon_fence_unref(&work->fence);
+ radeon_fence_unref(&work->fence);
}
+ /* We borrow the event spin lock for protecting flip_status */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ /* set the proper interrupt */
+ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+
+ /* do the flip (mmio) */
+ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+
+ radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ up_read(&rdev->exclusive_lock);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_flip_work *work;
+ struct radeon_bo *new_rbo;
+ uint32_t tiling_flags, pitch_pixels;
+ uint64_t base;
+ unsigned long flags;
+ int r;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ INIT_WORK(&work->flip_work, radeon_flip_work_func);
+ INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ work->event = event;
+
+ /* schedule unpin of the old buffer */
+ old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ obj = old_radeon_fb->obj;
+
+ /* take a reference to the old object */
+ drm_gem_object_reference(obj);
+ work->old_rbo = gem_to_radeon_bo(obj);
+
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ obj = new_radeon_fb->obj;
+ new_rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&new_rbo->tbo.bdev->fence_lock);
+ if (new_rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
+ spin_unlock(&new_rbo->tbo.bdev->fence_lock);
+
/* pin the new buffer */
- DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
- work->old_rbo, work->new_rbo);
+ DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
+ work->old_rbo, new_rbo);
- r = radeon_bo_reserve(work->new_rbo, false);
+ r = radeon_bo_reserve(new_rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
goto cleanup;
}
/* Only 27 bit offset for legacy CRTC */
- r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) {
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_unreserve(new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(new_rbo);
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
@@ -467,6 +535,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
}
base &= ~7;
}
+ work->base = base;
r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
if (r) {
@@ -477,100 +546,42 @@ static void radeon_flip_work_func(struct work_struct *__work)
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- /* set the proper interrupt */
- radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+ if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ r = -EBUSY;
+ goto vblank_cleanup;
+ }
+ radeon_crtc->flip_status = RADEON_FLIP_PENDING;
+ radeon_crtc->flip_work = work;
- /* do the flip (mmio) */
- radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
+ /* update crtc fb */
+ crtc->primary->fb = fb;
- radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- up_read(&rdev->exclusive_lock);
- return;
+ queue_work(radeon_crtc->flip_queue, &work->flip_work);
+ return 0;
+
+vblank_cleanup:
+ drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
pflip_cleanup:
- if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
+ if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
- if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
+ if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
- radeon_bo_unreserve(work->new_rbo);
+ radeon_bo_unreserve(new_rbo);
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
- up_read(&rdev->exclusive_lock);
-}
-
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_framebuffer *old_radeon_fb;
- struct radeon_framebuffer *new_radeon_fb;
- struct drm_gem_object *obj;
- struct radeon_flip_work *work;
- unsigned long flags;
-
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL)
- return -ENOMEM;
-
- INIT_WORK(&work->flip_work, radeon_flip_work_func);
- INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
-
- work->rdev = rdev;
- work->crtc_id = radeon_crtc->crtc_id;
- work->fb = fb;
- work->event = event;
-
- /* schedule unpin of the old buffer */
- old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- obj = old_radeon_fb->obj;
-
- /* take a reference to the old object */
- drm_gem_object_reference(obj);
- work->old_rbo = gem_to_radeon_bo(obj);
-
- new_radeon_fb = to_radeon_framebuffer(fb);
- obj = new_radeon_fb->obj;
- work->new_rbo = gem_to_radeon_bo(obj);
- spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
- if (work->new_rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
- spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
-
- /* We borrow the event spin lock for protecting flip_work */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
- if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- radeon_fence_unref(&work->fence);
- kfree(work);
- return -EBUSY;
- }
- radeon_crtc->flip_status = RADEON_FLIP_PENDING;
- radeon_crtc->flip_work = work;
-
- /* update crtc fb */
- crtc->primary->fb = fb;
-
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-
- queue_work(radeon_crtc->flip_queue, &work->flip_work);
-
- return 0;
+ return r;
}
static int
@@ -824,59 +835,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
return ret;
}
-int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
-{
- struct drm_device *dev = radeon_connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- int ret = 0;
-
- /* on hw with routers, select right port */
- if (radeon_connector->router.ddc_valid)
- radeon_router_select_ddc_port(radeon_connector);
-
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
- ENCODER_OBJECT_ID_NONE) {
- if (radeon_connector->ddc_bus->has_aux)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->aux.ddc);
- } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
- struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
-
- if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
- dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
- radeon_connector->ddc_bus->has_aux)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->aux.ddc);
- else if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- } else {
- if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- }
-
- if (!radeon_connector->edid) {
- if (rdev->is_atom_bios) {
- /* some laptops provide a hardcoded edid in rom for LCDs */
- if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- } else
- /* some servers provide a hardcoded edid in rom for KVMs */
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- }
- if (radeon_connector->edid) {
- drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
- ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
- drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
- return ret;
- }
- drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
- return 0;
-}
-
/* avivo */
/**
@@ -1745,7 +1703,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
(!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
is_hdtv_mode(mode)))) {
if (radeon_encoder->underscan_hborder != 0)
radeon_crtc->h_border = radeon_encoder->underscan_hborder;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb1421369e3a..a773830c6c40 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -82,9 +82,11 @@
* 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
* 2.39.0 - Add INFO query for number of active CUs
+ * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
+ * CS to GPU
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 39
+#define KMS_DRIVER_MINOR 40
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -173,9 +175,10 @@ int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
-int radeon_vm_block_size = 9;
+int radeon_vm_size = 8;
+int radeon_vm_block_size = -1;
int radeon_deep_color = 0;
+int radeon_use_pflipirq = 2;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -243,15 +246,18 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
module_param_named(hard_reset, radeon_hard_reset, int, 0444);
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
-MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
+MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, radeon_deep_color, int, 0444);
+MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
+module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index bd4959ca23aa..3c2094c25b53 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -343,7 +343,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (pixel_clock > 340000)
return true;
else
@@ -365,7 +365,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (pixel_clock > 340000)
return true;
else
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index db598d712901..94b0f2aa3d7c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -127,8 +127,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
- false, true,
- &gobj);
+ 0, true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 2e723651069b..a053a0779aac 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->gart.robj);
+ 0, NULL, &rdev->gart.robj);
if (r) {
return r;
}
@@ -243,7 +243,8 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base);
+ radeon_gart_set_page(rdev, t, page_base,
+ RADEON_GART_PAGE_DUMMY);
}
page_base += RADEON_GPU_PAGE_SIZE;
}
@@ -261,13 +262,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: RADEON_GART_PAGE_* flags
*
* Binds the requested pages to the gart page table
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist, dma_addr_t *dma_addr)
+ int pages, struct page **pagelist, dma_addr_t *dma_addr,
+ uint32_t flags)
{
unsigned t;
unsigned p;
@@ -287,7 +290,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
+ radeon_gart_set_page(rdev, t, page_base, flags);
page_base += RADEON_GPU_PAGE_SIZE;
}
}
@@ -298,33 +301,6 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
}
/**
- * radeon_gart_restore - bind all pages in the gart page table
- *
- * @rdev: radeon_device pointer
- *
- * Binds all pages in the gart page table (all asics).
- * Used to rebuild the gart table on device startup or resume.
- */
-void radeon_gart_restore(struct radeon_device *rdev)
-{
- int i, j, t;
- u64 page_base;
-
- if (!rdev->gart.ptr) {
- return;
- }
- for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
- page_base = rdev->gart.pages_addr[i];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
- page_base += RADEON_GPU_PAGE_SIZE;
- }
- }
- mb();
- radeon_gart_tlb_flush(rdev);
-}
-
-/**
* radeon_gart_init - init the driver info for managing the gart
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d09650c1d720..bfd7e1b0ff3f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -40,9 +40,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
}
}
-int radeon_gem_object_create(struct radeon_device *rdev, int size,
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int alignment, int initial_domain,
- bool discardable, bool kernel,
+ u32 flags, bool kernel,
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
@@ -55,23 +55,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
alignment = PAGE_SIZE;
}
- /* maximun bo size is the minimun btw visible vram and gtt size */
- max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ /* Maximum bo size is the unpinned gtt size since we use the gtt to
+ * handle vram to system pool migrations.
+ */
+ max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
if (size > max_size) {
- printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
- __func__, __LINE__, size >> 20, max_size >> 20);
+ DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
+ size >> 20, max_size >> 20);
return -ENOMEM;
}
retry:
- r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
+ flags, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
initial_domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
- DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+ DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
@@ -208,18 +211,15 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
- unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size;
args->vram_visible = (u64)man->size << PAGE_SHIFT;
- if (rdev->stollen_vga_memory)
- args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
- args->vram_visible -= radeon_fbdev_total_size(rdev);
- args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
- for(i = 0; i < RADEON_NUM_RINGS; ++i)
- args->gart_size -= rdev->ring[i].ring_size;
+ args->vram_visible -= rdev->vram_pin_size;
+ args->gart_size = rdev->mc.gtt_size;
+ args->gart_size -= rdev->gart_pin_size;
+
return 0;
}
@@ -252,8 +252,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
- args->initial_domain, false,
- false, &gobj);
+ args->initial_domain, args->flags,
+ false, &gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
@@ -358,16 +358,18 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
+ uint32_t cur_placement = 0;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, NULL, false);
- /* callback hw specific functions if any */
- if (rdev->asic->ioctl_wait_idle)
- robj->rdev->asic->ioctl_wait_idle(rdev, robj);
+ r = radeon_bo_wait(robj, &cur_placement, false);
+ /* Flush HDP cache via MMIO if necessary */
+ if (rdev->asic->mmio_hdp_flush &&
+ radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
+ robj->rdev->asic->mmio_hdp_flush(rdev);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
@@ -461,11 +463,6 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
}
- if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
- dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
- args->operation = RADEON_VA_RESULT_ERROR;
- return -EINVAL;
- }
switch (args->operation) {
case RADEON_VA_MAP:
@@ -499,9 +496,9 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case RADEON_VA_MAP:
- if (bo_va->soffset) {
+ if (bo_va->it.start) {
args->operation = RADEON_VA_RESULT_VA_EXIST;
- args->offset = bo_va->soffset;
+ args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
goto out;
}
r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -572,9 +569,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
args->size = ALIGN(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_device,
- &gobj);
+ RADEON_GEM_DOMAIN_VRAM, 0,
+ false, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
new file mode 100644
index 000000000000..65b0c213488d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ * Christian König
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored. You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them. Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
+
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics). IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib *ib, struct radeon_vm *vm,
+ unsigned size)
+{
+ int r;
+
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
+ if (r) {
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
+ return r;
+ }
+
+ r = radeon_semaphore_create(rdev, &ib->semaphore);
+ if (r) {
+ return r;
+ }
+
+ ib->ring = ring;
+ ib->fence = NULL;
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->vm = vm;
+ if (vm) {
+ /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+ * space and soffset is the offset inside the pool bo
+ */
+ ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+ } else {
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ }
+ ib->is_const_ib = false;
+
+ return 0;
+}
+
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
+}
+
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine). Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed. To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE. If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ int r = 0;
+
+ if (!ib->length_dw || !ring->ready) {
+ /* TODO: Nothings in the ib we should report. */
+ dev_err(rdev->dev, "couldn't schedule ib\n");
+ return -EINVAL;
+ }
+
+ /* 64 dwords should be enough for fence too */
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
+ if (r) {
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
+ return r;
+ }
+
+ /* grab a vm id if necessary */
+ if (ib->vm) {
+ struct radeon_fence *vm_id_fence;
+ vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
+ radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
+ }
+
+ /* sync with other rings */
+ r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ if (ib->vm)
+ radeon_vm_flush(rdev, ib->vm, ib->ring);
+
+ if (const_ib) {
+ radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+ radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ }
+ radeon_ring_ib_execute(rdev, ib->ring, ib);
+ r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ if (const_ib) {
+ const_ib->fence = radeon_fence_ref(ib->fence);
+ }
+
+ if (ib->vm)
+ radeon_vm_fence(rdev, ib->vm, ib->fence);
+
+ radeon_ring_unlock_commit(rdev, ring);
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+
+ if (rdev->family >= CHIP_BONAIRE) {
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_GTT_WC);
+ } else {
+ /* Before CIK, it's better to stick to cacheable GTT due
+ * to the command stream checking
+ */
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT, 0);
+ }
+ if (r) {
+ return r;
+ }
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+ if (r) {
+ return r;
+ }
+
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
+ }
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
+ }
+}
+
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (!ring->ready)
+ continue;
+
+ r = radeon_ib_test(rdev, i, ring);
+ if (r) {
+ ring->ready = false;
+ rdev->needs_reset = false;
+
+ if (i == RADEON_RING_TYPE_GFX_INDEX) {
+ /* oh, oh, that's really bad */
+ DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+
+ } else {
+ /* still not good, but we can live with it */
+ DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
+ return 0;
+
+}
+
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
+#endif
+
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d931881b4b..eb7164d07985 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -254,7 +254,18 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
break;
case RADEON_INFO_ACCEL_WORKING2:
- *value = rdev->accel_working;
+ if (rdev->family == CHIP_HAWAII) {
+ if (rdev->accel_working) {
+ if (rdev->new_fw)
+ *value = 3;
+ else
+ *value = 2;
+ } else {
+ *value = 0;
+ }
+ } else {
+ *value = rdev->accel_working;
+ }
break;
case RADEON_INFO_TILING_CONFIG:
if (rdev->family >= CHIP_BONAIRE)
@@ -579,7 +590,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +598,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- r = radeon_vm_init(rdev, &fpriv->vm);
+ vm = &fpriv->vm;
+ r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
return r;
@@ -596,22 +608,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
/* map the ib pool buffer read only into
* virtual address space */
- bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
- rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+ RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
@@ -640,21 +653,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm = &fpriv->vm;
int r;
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_vm_bo_find(&fpriv->vm,
- rdev->ring_tmp_bo.bo);
- if (bo_va)
- radeon_vm_bo_rmv(rdev, bo_va);
+ if (vm->ib_bo_va)
+ radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
}
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 0592ddb0904b..e27608c29c11 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -685,10 +685,11 @@ extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
-extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
extern int radeon_get_monitor_bpc(struct drm_connector *connector);
+extern struct edid *radeon_connector_edid(struct drm_connector *connector);
+
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
@@ -738,7 +739,6 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
-extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6c717b257d6d..480c87d8edc5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,16 +46,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
-static void radeon_bo_clear_va(struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va, *tmp;
-
- list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
- /* remove from all vm address space */
- radeon_vm_bo_rmv(bo->rdev, bo_va);
- }
-}
-
static void radeon_update_memory_usage(struct radeon_bo *bo,
unsigned mem_type, int sign)
{
@@ -90,7 +80,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
- radeon_bo_clear_va(bo);
+ WARN_ON(!list_empty(&bo->va));
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -114,15 +104,23 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (domain & RADEON_GEM_DOMAIN_GTT) {
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
+ } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+ (rbo->rdev->flags & RADEON_IS_AGP)) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_TT;
} else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
}
}
if (domain & RADEON_GEM_DOMAIN_CPU) {
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+ if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
+ } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+ rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_SYSTEM;
} else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
}
@@ -146,7 +144,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
- struct sg_table *sg, struct radeon_bo **bo_ptr)
+ u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
@@ -183,6 +181,12 @@ int radeon_bo_create(struct radeon_device *rdev,
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
RADEON_GEM_DOMAIN_GTT |
RADEON_GEM_DOMAIN_CPU);
+
+ bo->flags = flags;
+ /* PCI GART is always snooped */
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
@@ -232,6 +236,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
ttm_bo_kunmap(&bo->kmap);
}
+struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
+{
+ if (bo == NULL)
+ return NULL;
+
+ ttm_bo_reference(&bo->tbo);
+ return bo;
+}
+
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
@@ -241,9 +254,7 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- down_read(&rdev->pm.mclk_lock);
ttm_bo_unref(&tbo);
- up_read(&rdev->pm.mclk_lock);
if (tbo == NULL)
*bo = NULL;
}
@@ -292,9 +303,13 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo);
- }
- if (unlikely(r != 0))
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ bo->rdev->vram_pin_size += radeon_bo_size(bo);
+ else
+ bo->rdev->gart_pin_size += radeon_bo_size(bo);
+ } else {
dev_err(bo->rdev->dev, "%p pin failed\n", bo);
+ }
return r;
}
@@ -317,8 +332,14 @@ int radeon_bo_unpin(struct radeon_bo *bo)
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r != 0))
+ if (likely(r == 0)) {
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ bo->rdev->vram_pin_size -= radeon_bo_size(bo);
+ else
+ bo->rdev->gart_pin_size -= radeon_bo_size(bo);
+ } else {
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
+ }
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 5a873f31a171..98a47fdf3625 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -124,11 +124,12 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
- bool kernel, u32 domain,
+ bool kernel, u32 domain, u32 flags,
struct sg_table *sg,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
@@ -170,7 +171,8 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain);
+ unsigned size, u32 align, u32 domain,
+ u32 flags);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index e447e390d09a..23314be49480 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1303,10 +1303,6 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RS780:
case CHIP_RS880:
case CHIP_RV770:
- case CHIP_BARTS:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- case CHIP_CAYMAN:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1330,6 +1326,10 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ case CHIP_CAYMAN:
case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@@ -1400,9 +1400,7 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
}
radeon_hwmon_fini(rdev);
-
- if (rdev->pm.power_state)
- kfree(rdev->pm.power_state);
+ kfree(rdev->pm.power_state);
}
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1421,9 +1419,7 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
radeon_dpm_fini(rdev);
radeon_hwmon_fini(rdev);
-
- if (rdev->pm.power_state)
- kfree(rdev->pm.power_state);
+ kfree(rdev->pm.power_state);
}
void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 20074560fc25..f7e48d329db3 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -65,7 +65,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
int ret;
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, sg, &bo);
+ RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f8050f5429e2..5b4e0cf231a0 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -26,258 +26,8 @@
* Jerome Glisse
* Christian König
*/
-#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon_reg.h"
#include "radeon.h"
-#include "atom.h"
-
-/*
- * IB
- * IBs (Indirect Buffers) and areas of GPU accessible memory where
- * commands are stored. You can put a pointer to the IB in the
- * command ring and the hw will fetch the commands from the IB
- * and execute them. Generally userspace acceleration drivers
- * produce command buffers which are send to the kernel and
- * put in IBs for execution by the requested ring.
- */
-static int radeon_debugfs_sa_init(struct radeon_device *rdev);
-
-/**
- * radeon_ib_get - request an IB (Indirect Buffer)
- *
- * @rdev: radeon_device pointer
- * @ring: ring index the IB is associated with
- * @ib: IB object returned
- * @size: requested IB size
- *
- * Request an IB (all asics). IBs are allocated using the
- * suballocator.
- * Returns 0 on success, error on failure.
- */
-int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib *ib, struct radeon_vm *vm,
- unsigned size)
-{
- int r;
-
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
- if (r) {
- dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
- return r;
- }
-
- r = radeon_semaphore_create(rdev, &ib->semaphore);
- if (r) {
- return r;
- }
-
- ib->ring = ring;
- ib->fence = NULL;
- ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
- ib->vm = vm;
- if (vm) {
- /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
- * space and soffset is the offset inside the pool bo
- */
- ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
- } else {
- ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
- }
- ib->is_const_ib = false;
-
- return 0;
-}
-
-/**
- * radeon_ib_free - free an IB (Indirect Buffer)
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to free
- *
- * Free an IB (all asics).
- */
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
- radeon_fence_unref(&ib->fence);
-}
-
-/**
- * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- * @const_ib: Const IB to schedule (SI only)
- *
- * Schedule an IB on the associated ring (all asics).
- * Returns 0 on success, error on failure.
- *
- * On SI, there are two parallel engines fed from the primary ring,
- * the CE (Constant Engine) and the DE (Drawing Engine). Since
- * resource descriptors have moved to memory, the CE allows you to
- * prime the caches while the DE is updating register state so that
- * the resource descriptors will be already in cache when the draw is
- * processed. To accomplish this, the userspace driver submits two
- * IBs, one for the CE and one for the DE. If there is a CE IB (called
- * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
- * to SI there was just a DE IB.
- */
-int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
- struct radeon_ib *const_ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
- int r = 0;
-
- if (!ib->length_dw || !ring->ready) {
- /* TODO: Nothings in the ib we should report. */
- dev_err(rdev->dev, "couldn't schedule ib\n");
- return -EINVAL;
- }
-
- /* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
- if (r) {
- dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
- return r;
- }
-
- /* grab a vm id if necessary */
- if (ib->vm) {
- struct radeon_fence *vm_id_fence;
- vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
- radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
- }
-
- /* sync with other rings */
- r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
- if (r) {
- dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- if (ib->vm)
- radeon_vm_flush(rdev, ib->vm, ib->ring);
-
- if (const_ib) {
- radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
- radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
- }
- radeon_ring_ib_execute(rdev, ib->ring, ib);
- r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
- if (r) {
- dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
- if (const_ib) {
- const_ib->fence = radeon_fence_ref(ib->fence);
- }
-
- if (ib->vm)
- radeon_vm_fence(rdev, ib->vm, ib->fence);
-
- radeon_ring_unlock_commit(rdev, ring);
- return 0;
-}
-
-/**
- * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
- *
- * @rdev: radeon_device pointer
- *
- * Initialize the suballocator to manage a pool of memory
- * for use as IBs (all asics).
- * Returns 0 on success, error on failure.
- */
-int radeon_ib_pool_init(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->ib_pool_ready) {
- return 0;
- }
- r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
- RADEON_IB_POOL_SIZE*64*1024,
- RADEON_GPU_PAGE_SIZE,
- RADEON_GEM_DOMAIN_GTT);
- if (r) {
- return r;
- }
-
- r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
- if (r) {
- return r;
- }
-
- rdev->ib_pool_ready = true;
- if (radeon_debugfs_sa_init(rdev)) {
- dev_err(rdev->dev, "failed to register debugfs file for SA\n");
- }
- return 0;
-}
-
-/**
- * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
- *
- * @rdev: radeon_device pointer
- *
- * Tear down the suballocator managing the pool of memory
- * for use as IBs (all asics).
- */
-void radeon_ib_pool_fini(struct radeon_device *rdev)
-{
- if (rdev->ib_pool_ready) {
- radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
- radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
- rdev->ib_pool_ready = false;
- }
-}
-
-/**
- * radeon_ib_ring_tests - test IBs on the rings
- *
- * @rdev: radeon_device pointer
- *
- * Test an IB (Indirect Buffer) on each ring.
- * If the test fails, disable the ring.
- * Returns 0 on success, error if the primary GFX ring
- * IB test fails.
- */
-int radeon_ib_ring_tests(struct radeon_device *rdev)
-{
- unsigned i;
- int r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_ring *ring = &rdev->ring[i];
-
- if (!ring->ready)
- continue;
-
- r = radeon_ib_test(rdev, i, ring);
- if (r) {
- ring->ready = false;
- rdev->needs_reset = false;
-
- if (i == RADEON_RING_TYPE_GFX_INDEX) {
- /* oh, oh, that's really bad */
- DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
- rdev->accel_working = false;
- return r;
-
- } else {
- /* still not good, but we can live with it */
- DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
- }
- }
- }
- return 0;
-}
/*
* Rings
@@ -433,11 +183,21 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
*/
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
+ /* If we are emitting the HDP flush via the ring buffer, we need to
+ * do it before padding.
+ */
+ if (rdev->asic->ring[ring->idx]->hdp_flush)
+ rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
mb();
+ /* If we are emitting the HDP flush via MMIO, we need to do it after
+ * all CPU writes to VRAM finished.
+ */
+ if (rdev->asic->mmio_hdp_flush)
+ rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring);
}
@@ -641,6 +401,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
+ (rdev->flags & RADEON_IS_PCIE) ?
+ RADEON_GEM_GTT_WC : 0,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
@@ -791,22 +553,6 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
};
-static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
-
- return 0;
-
-}
-
-static struct drm_info_list radeon_debugfs_sa_list[] = {
- {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
-};
-
#endif
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
@@ -828,12 +574,3 @@ static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ri
#endif
return 0;
}
-
-static int radeon_debugfs_sa_init(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index adcf3e2f07da..b84f97c8718c 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain)
+ unsigned size, u32 align, u32 domain, u32 flags)
{
int i, r;
@@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
}
r = radeon_bo_create(rdev, size, align, true,
- domain, NULL, &sa_manager->bo);
+ domain, flags, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 3a13e0d1055c..5adf4207453d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -56,13 +56,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- n -= rdev->ring[i].ring_size;
- if (rdev->wb.wb_obj)
- n -= RADEON_GPU_PAGE_SIZE;
- if (rdev->ih.ring_obj)
- n -= rdev->ih.ring_size;
+ n = rdev->mc.gtt_size - rdev->gart_pin_size;
n /= size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
@@ -73,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &vram_obj);
+ 0, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -93,7 +87,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
struct radeon_fence *fence = NULL;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
+ RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f749f2c3bbdb..9db74a96ef61 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -72,8 +72,8 @@ TRACE_EVENT(radeon_vm_bo_update,
),
TP_fast_assign(
- __entry->soffset = bo_va->soffset;
- __entry->eoffset = bo_va->eoffset;
+ __entry->soffset = bo_va->it.start;
+ __entry->eoffset = bo_va->it.last + 1;
__entry->flags = bo_va->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
@@ -104,6 +104,24 @@ TRACE_EVENT(radeon_vm_set_page,
__entry->flags, __entry->count)
);
+TRACE_EVENT(radeon_vm_flush,
+ TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
+ TP_ARGS(pd_addr, ring, id),
+ TP_STRUCT__entry(
+ __field(u64, pd_addr)
+ __field(u32, ring)
+ __field(u32, id)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_addr = pd_addr;
+ __entry->ring = ring;
+ __entry->id = id;
+ ),
+ TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
+ __entry->pd_addr, __entry->ring, __entry->id)
+);
+
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c8a8a5144ec1..72afe82a95c9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -521,6 +521,8 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_tt *gtt = (void*)ttm;
+ uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
+ RADEON_GART_PAGE_WRITE;
int r;
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
@@ -528,8 +530,10 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
- r = radeon_gart_bind(gtt->rdev, gtt->offset,
- ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+ if (ttm->caching_state == tt_cached)
+ flags |= RADEON_GART_PAGE_SNOOP;
+ r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
@@ -726,7 +730,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM, 0,
NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.c b/drivers/gpu/drm/radeon/radeon_ucode.c
new file mode 100644
index 000000000000..6beec680390c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ucode.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_ucode.h"
+
+static void radeon_ucode_print_common_hdr(const struct common_firmware_header *hdr)
+{
+ DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
+ DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
+ DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
+ DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
+ DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
+ DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
+ DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
+ DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
+ DRM_DEBUG("ucode_array_offset_bytes: %u\n",
+ le32_to_cpu(hdr->ucode_array_offset_bytes));
+ DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
+}
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("MC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct mc_firmware_header_v1_0 *mc_hdr =
+ container_of(hdr, struct mc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("io_debug_size_bytes: %u\n",
+ le32_to_cpu(mc_hdr->io_debug_size_bytes));
+ DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
+ le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
+ } else {
+ DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("SMC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct smc_firmware_header_v1_0 *smc_hdr =
+ container_of(hdr, struct smc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
+ } else {
+ DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("GFX\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct gfx_firmware_header_v1_0 *gfx_hdr =
+ container_of(hdr, struct gfx_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(gfx_hdr->ucode_feature_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
+ } else {
+ DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("RLC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct rlc_firmware_header_v1_0 *rlc_hdr =
+ container_of(hdr, struct rlc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr->ucode_feature_version));
+ DRM_DEBUG("save_and_restore_offset: %u\n",
+ le32_to_cpu(rlc_hdr->save_and_restore_offset));
+ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+ DRM_DEBUG("master_pkt_description_offset: %u\n",
+ le32_to_cpu(rlc_hdr->master_pkt_description_offset));
+ } else {
+ DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("SDMA\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct sdma_firmware_header_v1_0 *sdma_hdr =
+ container_of(hdr, struct sdma_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_feature_version));
+ DRM_DEBUG("ucode_change_version: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_change_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
+ } else {
+ DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
+ version_major, version_minor);
+ }
+}
+
+int radeon_ucode_validate(const struct firmware *fw)
+{
+ const struct common_firmware_header *hdr =
+ (const struct common_firmware_header *)fw->data;
+
+ if (fw->size == le32_to_cpu(hdr->size_bytes))
+ return 0;
+
+ return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 4e7c3269b183..dc4576e4d8ad 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -153,4 +153,75 @@
#define HAWAII_SMC_UCODE_START 0x20000
#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
+struct common_firmware_header {
+ uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
+ uint32_t header_size_bytes; /* size of just the header in bytes */
+ uint16_t header_version_major; /* header version */
+ uint16_t header_version_minor; /* header version */
+ uint16_t ip_version_major; /* IP version */
+ uint16_t ip_version_minor; /* IP version */
+ uint32_t ucode_version;
+ uint32_t ucode_size_bytes; /* size of ucode in bytes */
+ uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
+ uint32_t crc32; /* crc32 checksum of the payload */
+};
+
+/* version_major=1, version_minor=0 */
+struct mc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t io_debug_size_bytes; /* size of debug array in dwords */
+ uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
+};
+
+/* version_major=1, version_minor=0 */
+struct smc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_start_addr;
+};
+
+/* version_major=1, version_minor=0 */
+struct gfx_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t jt_offset; /* jt location */
+ uint32_t jt_size; /* size of jt */
+};
+
+/* version_major=1, version_minor=0 */
+struct rlc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t save_and_restore_offset;
+ uint32_t clear_state_descriptor_offset;
+ uint32_t avail_scratch_ram_locations;
+ uint32_t master_pkt_description_offset;
+};
+
+/* version_major=1, version_minor=0 */
+struct sdma_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t ucode_change_version;
+ uint32_t jt_offset; /* jt location */
+ uint32_t jt_size; /* size of jt */
+};
+
+/* header is fixed size */
+union radeon_firmware_header {
+ struct common_firmware_header common;
+ struct mc_firmware_header_v1_0 mc;
+ struct smc_firmware_header_v1_0 smc;
+ struct gfx_firmware_header_v1_0 gfx;
+ struct rlc_firmware_header_v1_0 rlc;
+ struct sdma_firmware_header_v1_0 sdma;
+ uint8_t raw[0x100];
+};
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
+int radeon_ucode_validate(const struct firmware *fw);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index a4ad270e8261..6bf55ec85b62 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -117,7 +117,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
@@ -674,7 +674,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
@@ -720,7 +720,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index aa21c31a846c..f9b70a43aa52 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index eecff6bbd341..ccae4d9dc3de 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -238,8 +238,8 @@ void radeon_vm_flush(struct radeon_device *rdev,
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
/* if we can't remember our last VM flush then flush now! */
- /* XXX figure out why we have to flush all the time */
- if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
+ if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
+ trace_radeon_vm_flush(pd_addr, ring, vm->id);
vm->pd_gpu_addr = pd_addr;
radeon_ring_vm_flush(rdev, ring, vm);
}
@@ -325,16 +325,15 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->soffset = 0;
- bo_va->eoffset = 0;
+ bo_va->it.start = 0;
+ bo_va->it.last = 0;
bo_va->flags = 0;
- bo_va->valid = false;
+ bo_va->addr = 0;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->vm_list);
+ INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
- list_add(&bo_va->vm_list, &vm->va);
list_add_tail(&bo_va->bo_list, &bo->va);
mutex_unlock(&vm->mutex);
@@ -342,6 +341,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
}
/**
+ * radeon_vm_set_pages - helper to call the right asic function
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void radeon_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+ uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+ radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
+
+ } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
+ radeon_asic_vm_write_pages(rdev, ib, pe, addr,
+ count, incr, flags);
+
+ } else {
+ radeon_asic_vm_set_pages(rdev, ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
* radeon_vm_clear_bo - initially clear the page dir/table
*
* @rdev: radeon_device pointer
@@ -375,14 +410,15 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
addr = radeon_bo_gpu_offset(bo);
entries = radeon_bo_size(bo) / 8;
- r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
- NULL, entries * 2 + 64);
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
if (r)
goto error;
ib.length_dw = 0;
- radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
+ radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
+ radeon_asic_vm_pad_ib(rdev, &ib);
+ WARN_ON(ib.length_dw > 64);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r)
@@ -418,11 +454,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
uint32_t flags)
{
uint64_t size = radeon_bo_size(bo_va->bo);
- uint64_t eoffset, last_offset = 0;
struct radeon_vm *vm = bo_va->vm;
- struct radeon_bo_va *tmp;
- struct list_head *head;
unsigned last_pfn, pt_idx;
+ uint64_t eoffset;
int r;
if (soffset) {
@@ -444,38 +478,49 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
mutex_lock(&vm->mutex);
- head = &vm->va;
- last_offset = 0;
- list_for_each_entry(tmp, &vm->va, vm_list) {
- if (bo_va == tmp) {
- /* skip over currently modified bo */
- continue;
+ if (bo_va->it.start || bo_va->it.last) {
+ if (bo_va->addr) {
+ /* add a clone of the bo_va to clear the old address */
+ struct radeon_bo_va *tmp;
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ tmp->it.start = bo_va->it.start;
+ tmp->it.last = bo_va->it.last;
+ tmp->vm = vm;
+ tmp->addr = bo_va->addr;
+ tmp->bo = radeon_bo_ref(bo_va->bo);
+ list_add(&tmp->vm_status, &vm->freed);
}
- if (soffset >= last_offset && eoffset <= tmp->soffset) {
- /* bo can be added before this one */
- break;
- }
- if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
+ interval_tree_remove(&bo_va->it, &vm->va);
+ bo_va->it.start = 0;
+ bo_va->it.last = 0;
+ }
+
+ soffset /= RADEON_GPU_PAGE_SIZE;
+ eoffset /= RADEON_GPU_PAGE_SIZE;
+ if (soffset || eoffset) {
+ struct interval_tree_node *it;
+ it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+ if (it) {
+ struct radeon_bo_va *tmp;
+ tmp = container_of(it, struct radeon_bo_va, it);
/* bo and tmp overlap, invalid offset */
- dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
- bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
- (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+ dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
+ "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
+ soffset, tmp->bo, tmp->it.start, tmp->it.last);
mutex_unlock(&vm->mutex);
return -EINVAL;
}
- last_offset = tmp->eoffset;
- head = &tmp->vm_list;
+ bo_va->it.start = soffset;
+ bo_va->it.last = eoffset - 1;
+ interval_tree_insert(&bo_va->it, &vm->va);
}
- bo_va->soffset = soffset;
- bo_va->eoffset = eoffset;
bo_va->flags = flags;
- bo_va->valid = false;
- list_move(&bo_va->vm_list, head);
+ bo_va->addr = 0;
- soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
- eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
+ soffset >>= radeon_vm_block_size;
+ eoffset >>= radeon_vm_block_size;
BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
@@ -496,7 +541,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
if (r)
return r;
@@ -597,7 +642,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
ndw = 64;
/* assume the worst case */
- ndw += vm->max_pde_used * 16;
+ ndw += vm->max_pde_used * 6;
/* update too big for an IB */
if (ndw > 0xfffff)
@@ -626,9 +671,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
((last_pt + incr * count) != pt)) {
if (count) {
- radeon_asic_vm_set_page(rdev, &ib, last_pde,
- last_pt, count, incr,
- R600_PTE_VALID);
+ radeon_vm_set_pages(rdev, &ib, last_pde,
+ last_pt, count, incr,
+ R600_PTE_VALID);
}
count = 1;
@@ -640,12 +685,14 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
}
if (count)
- radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
- incr, R600_PTE_VALID);
+ radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
+ incr, R600_PTE_VALID);
if (ib.length_dw != 0) {
+ radeon_asic_vm_pad_ib(rdev, &ib);
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
+ WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
@@ -711,30 +758,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
(frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
return;
}
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
addr += RADEON_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags | frag_flags);
+ radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += RADEON_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
- radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
}
}
@@ -763,9 +810,6 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
unsigned count = 0;
uint64_t addr;
- start = start / RADEON_GPU_PAGE_SIZE;
- end = end / RADEON_GPU_PAGE_SIZE;
-
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> radeon_vm_block_size;
@@ -823,66 +867,78 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
* Object have to be reserved and mutex must be locked!
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem)
{
+ struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
- struct radeon_bo_va *bo_va;
- unsigned nptes, ndw;
+ unsigned nptes, ncmds, ndw;
uint64_t addr;
+ uint32_t flags;
int r;
- bo_va = radeon_vm_bo_find(vm, bo);
- if (bo_va == NULL) {
- dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
- return -EINVAL;
- }
-
- if (!bo_va->soffset) {
+ if (!bo_va->it.start) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
- bo, vm);
+ bo_va->bo, vm);
return -EINVAL;
}
- if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
- return 0;
+ list_del_init(&bo_va->vm_status);
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
if (mem) {
addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
- bo_va->valid = true;
}
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
+ bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
+
} else {
addr += rdev->vm_manager.vram_base_offset;
}
} else {
addr = 0;
- bo_va->valid = false;
}
+ if (addr == bo_va->addr)
+ return 0;
+ bo_va->addr = addr;
+
trace_radeon_vm_bo_update(bo_va);
- nptes = radeon_bo_ngpu_pages(bo);
+ nptes = bo_va->it.last - bo_va->it.start + 1;
+
+ /* reserve space for one command every (1 << BLOCK_SIZE) entries
+ or 2k dwords (whatever is smaller) */
+ ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
/* padding, etc. */
ndw = 64;
- if (radeon_vm_block_size > 11)
- /* reserve space for one header for every 2k dwords */
- ndw += (nptes >> 11) * 4;
- else
- /* reserve space for one header for
- every (1 << BLOCK_SIZE) entries */
- ndw += (nptes >> radeon_vm_block_size) * 4;
+ flags = radeon_vm_page_flags(bo_va->flags);
+ if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+ /* only copy commands needed */
+ ndw += ncmds * 7;
+
+ } else if (flags & R600_PTE_SYSTEM) {
+ /* header for write data commands */
+ ndw += ncmds * 4;
- /* reserve space for pte addresses */
- ndw += nptes * 2;
+ /* body of write data command */
+ ndw += nptes * 2;
+
+ } else {
+ /* set page commands needed */
+ ndw += ncmds * 10;
+
+ /* two extra commands for begin/end of fragment */
+ ndw += 2 * 10;
+ }
/* update too big for an IB */
if (ndw > 0xfffff)
@@ -893,8 +949,12 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return r;
ib.length_dw = 0;
- radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
- addr, radeon_vm_page_flags(bo_va->flags));
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+ bo_va->it.last + 1, addr,
+ radeon_vm_page_flags(bo_va->flags));
+
+ radeon_asic_vm_pad_ib(rdev, &ib);
+ WARN_ON(ib.length_dw > ndw);
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
@@ -911,33 +971,87 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
}
/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ radeon_bo_unref(&bo_va->bo);
+ kfree(bo_va);
+ if (r)
+ return r;
+ }
+ return 0;
+
+}
+
+/**
+ * radeon_vm_clear_invalids - clear invalidated BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all invalidated BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
+ r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
*
* Object have to be reserved!
*/
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
{
- int r = 0;
-
- mutex_lock(&bo_va->vm->mutex);
- if (bo_va->soffset)
- r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+ struct radeon_vm *vm = bo_va->vm;
- list_del(&bo_va->vm_list);
- mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
- kfree(bo_va);
- return r;
+ mutex_lock(&vm->mutex);
+ interval_tree_remove(&bo_va->it, &vm->va);
+ list_del(&bo_va->vm_status);
+
+ if (bo_va->addr) {
+ bo_va->bo = radeon_bo_ref(bo_va->bo);
+ list_add(&bo_va->vm_status, &vm->freed);
+ } else {
+ kfree(bo_va);
+ }
+
+ mutex_unlock(&vm->mutex);
}
/**
@@ -955,7 +1069,12 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- bo_va->valid = false;
+ if (bo_va->addr) {
+ mutex_lock(&bo_va->vm->mutex);
+ list_del(&bo_va->vm_status);
+ list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
+ mutex_unlock(&bo_va->vm->mutex);
+ }
}
}
@@ -975,11 +1094,14 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
int r;
vm->id = 0;
+ vm->ib_bo_va = NULL;
vm->fence = NULL;
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
- INIT_LIST_HEAD(&vm->va);
+ vm->va = RB_ROOT;
+ INIT_LIST_HEAD(&vm->invalidated);
+ INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
pd_entries = radeon_vm_num_pdes(rdev);
@@ -993,7 +1115,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
}
r = radeon_bo_create(rdev, pd_size, align, true,
- RADEON_GEM_DOMAIN_VRAM, NULL,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&vm->page_directory);
if (r)
return r;
@@ -1022,11 +1144,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int i, r;
- if (!list_empty(&vm->va)) {
+ if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}
- list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
- list_del_init(&bo_va->vm_list);
+ rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
+ interval_tree_remove(&bo_va->it, &vm->va);
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
@@ -1034,7 +1156,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
kfree(bo_va);
}
}
-
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ radeon_bo_unref(&bo_va->bo);
+ kfree(bo_va);
+ }
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a0f96decece3..6c1fc339d228 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -109,7 +109,6 @@ int rs400_gart_enable(struct radeon_device *rdev)
uint32_t size_reg;
uint32_t tmp;
- radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -209,17 +208,24 @@ void rs400_gart_fini(struct radeon_device *rdev)
radeon_gart_table_ram_free(rdev);
}
+#define RS400_PTE_UNSNOOPED (1 << 0)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr, uint32_t flags)
{
uint32_t entry;
u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
- ((upper_32_bits(addr) & 0xff) << 4) |
- RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
+ ((upper_32_bits(addr) & 0xff) << 4);
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= RS400_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= RS400_PTE_WRITEABLE;
+ if (!(flags & RADEON_GART_PAGE_SNOOP))
+ entry |= RS400_PTE_UNSNOOPED;
entry = cpu_to_le32(entry);
gtt[i] = entry;
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d1a35cb1c91d..5f6db4629aaa 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -555,7 +555,6 @@ static int rs600_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Enable bus master */
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
@@ -626,15 +625,21 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr, uint32_t flags)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- if (addr == rdev->dummy_page.addr)
- addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- else
- addr |= R600_PTE_GART;
+ addr |= R600_PTE_SYSTEM;
+ if (flags & RADEON_GART_PAGE_VALID)
+ addr |= R600_PTE_VALID;
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= R600_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= R600_PTE_WRITEABLE;
+ if (flags & RADEON_GART_PAGE_SNOOP)
+ addr |= R600_PTE_SNOOPED;
writeq(addr, ptr + (i * 8));
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 237dd29d9f1c..3e21e869015f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x3) != 0) {
- tmp &= ~0x3;
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index da8703d8d455..2983f17ea1b3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -900,7 +900,6 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index da041a43d82e..3c76e1dcdf04 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
- /* disable ss, causes hangs on some cayman boards */
- if (rdev->family == CHIP_CAYMAN) {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- }
-
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 730cee2c34cf..011779bd2b3d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -42,6 +42,14 @@ MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
@@ -49,6 +57,14 @@ MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
@@ -56,6 +72,14 @@ MODULE_FIRMWARE("radeon/VERDE_mc.bin");
MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
MODULE_FIRMWARE("radeon/VERDE_smc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
MODULE_FIRMWARE("radeon/OLAND_me.bin");
MODULE_FIRMWARE("radeon/OLAND_ce.bin");
@@ -63,6 +87,14 @@ MODULE_FIRMWARE("radeon/OLAND_mc.bin");
MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
MODULE_FIRMWARE("radeon/OLAND_smc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+
MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
MODULE_FIRMWARE("radeon/HAINAN_me.bin");
MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
@@ -71,6 +103,13 @@ MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_mc.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
@@ -1470,38 +1509,54 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
/* ucode loading */
int si_mc_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
+ const __be32 *fw_data = NULL;
+ const __le32 *new_fw_data = NULL;
u32 running, blackout = 0;
- u32 *io_mc_regs;
+ u32 *io_mc_regs = NULL;
+ const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
- ucode_size = rdev->mc_fw->size / 4;
+ if (rdev->new_fw) {
+ const struct mc_firmware_header_v1_0 *hdr =
+ (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
+
+ radeon_ucode_print_mc_hdr(&hdr->header);
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ ucode_size = rdev->mc_fw->size / 4;
- switch (rdev->family) {
- case CHIP_TAHITI:
- io_mc_regs = (u32 *)&tahiti_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_PITCAIRN:
- io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_VERDE:
- default:
- io_mc_regs = (u32 *)&verde_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_OLAND:
- io_mc_regs = (u32 *)&oland_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_HAINAN:
- io_mc_regs = (u32 *)&hainan_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ io_mc_regs = (u32 *)&tahiti_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_VERDE:
+ default:
+ io_mc_regs = (u32 *)&verde_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAINAN:
+ io_mc_regs = (u32 *)&hainan_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ }
+ fw_data = (const __be32 *)rdev->mc_fw->data;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1518,13 +1573,21 @@ int si_mc_load_microcode(struct radeon_device *rdev)
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
- WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ if (rdev->new_fw) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ } else {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
}
/* load the MC ucode */
- fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < ucode_size; i++)
- WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ for (i = 0; i < ucode_size; i++) {
+ if (rdev->new_fw)
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ else
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ }
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
@@ -1553,18 +1616,19 @@ int si_mc_load_microcode(struct radeon_device *rdev)
static int si_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
- const char *rlc_chip_name;
+ const char *new_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
size_t smc_req_size, mc2_req_size;
char fw_name[30];
int err;
+ int new_fw = 0;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
- rlc_chip_name = "TAHITI";
+ new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1575,7 +1639,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
- rlc_chip_name = "PITCAIRN";
+ new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1586,7 +1650,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_VERDE:
chip_name = "VERDE";
- rlc_chip_name = "VERDE";
+ new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1597,7 +1661,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_OLAND:
chip_name = "OLAND";
- rlc_chip_name = "OLAND";
+ new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1607,7 +1671,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
- rlc_chip_name = "HAINAN";
+ new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1618,86 +1682,178 @@ static int si_init_microcode(struct radeon_device *rdev)
default: BUG();
}
- DRM_INFO("Loading %s Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", new_chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
- err = -EINVAL;
- goto out;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->pfp_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->me_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->ce_fw->size != ce_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->ce_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->ce_fw->size != ce_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->ce_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
- "si_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "si_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->rlc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ }
+ if ((rdev->mc_fw->size != mc_req_size) &&
+ (rdev->mc_fw->size != mc2_req_size)) {
+ printk(KERN_ERR
+ "si_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+ } else {
+ err = radeon_ucode_validate(rdev->mc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
goto out;
+ } else {
+ new_fw++;
+ }
}
- if ((rdev->mc_fw->size != mc_req_size) &&
- (rdev->mc_fw->size != mc2_req_size)) {
- printk(KERN_ERR
- "si_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "si_smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "si_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->smc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
+ if (new_fw == 0) {
+ rdev->new_fw = false;
+ } else if (new_fw < 6) {
+ printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
+ err = -EINVAL;
+ } else {
+ rdev->new_fw = true;
+ }
out:
if (err) {
if (err != -EINVAL)
@@ -3282,34 +3438,77 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
static int si_cp_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
- if (!rdev->me_fw || !rdev->pfp_fw)
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
return -EINVAL;
si_cp_enable(rdev, false);
- /* PFP */
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
- WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __be32 *)rdev->ce_fw->data;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < SI_CE_UCODE_SIZE; i++)
- WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)rdev->me_fw->data;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
- WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *pfp_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ const struct gfx_firmware_header_v1_0 *ce_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ const struct gfx_firmware_header_v1_0 *me_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+ radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+ radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < SI_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ }
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
@@ -4048,7 +4247,6 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
@@ -4815,7 +5013,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
@@ -5592,7 +5790,6 @@ static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
static int si_rlc_resume(struct radeon_device *rdev)
{
u32 i;
- const __be32 *fw_data;
if (!rdev->rlc_fw)
return -EINVAL;
@@ -5615,10 +5812,26 @@ static int si_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- fw_data = (const __be32 *)rdev->rlc_fw->data;
- for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ if (rdev->new_fw) {
+ const struct rlc_firmware_header_v1_0 *hdr =
+ (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+ u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ const __le32 *fw_data = (const __le32 *)
+ (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ radeon_ucode_print_rlc_hdr(&hdr->header);
+
+ for (i = 0; i < fw_size; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+ }
+ } else {
+ const __be32 *fw_data =
+ (const __be32 *)rdev->rlc_fw->data;
+ for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
}
WREG32(RLC_UCODE_ADDR, 0);
@@ -6103,6 +6316,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -6317,7 +6531,8 @@ restart_ih:
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
@@ -6376,14 +6591,16 @@ restart_ih:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
si_vm_decode_fault(rdev, status, addr);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index e24c94b6d14d..716505129450 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -56,7 +56,41 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
- * si_dma_vm_set_page - update the page tables using the DMA
+ * si_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (SI).
+ */
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0xFFFF8)
+ bytes = 0xFFFF8;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, bytes);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+}
+
+/**
+ * si_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
@@ -66,83 +100,89 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the DMA (SI).
+ * Update PTEs by writing them manually using the DMA (SI).
*/
-void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if (flags == R600_PTE_GART) {
- uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0xFFFF8)
- bytes = 0xFFFF8;
-
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
- 1, 0, 0, bytes);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
- } else if (flags & R600_PTE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & R600_PTE_VALID)
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
}
}
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * si_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
}
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 58918868f894..70e61ffeace2 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3812,6 +3812,27 @@ void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
voltage_table->count = max_voltage_steps;
}
+static int si_get_svi2_voltage_table(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
static int si_construct_voltage_tables(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
@@ -3819,15 +3840,25 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
struct si_power_info *si_pi = si_get_pi(rdev);
int ret;
- ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
- if (ret)
- return ret;
+ if (pi->voltage_control) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
- if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &eg_pi->vddc_voltage_table);
+ if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddc_voltage_table);
+ } else if (si_pi->voltage_control_svi2) {
+ ret = si_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
if (eg_pi->vddci_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
@@ -3840,6 +3871,13 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
SISLANDS_MAX_NO_VREG_STEPS,
&eg_pi->vddci_voltage_table);
}
+ if (si_pi->vddci_control_svi2) {
+ ret = si_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &eg_pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
if (pi->mvdd_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
@@ -3893,46 +3931,55 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
struct si_power_info *si_pi = si_get_pi(rdev);
u8 i;
- if (eg_pi->vddc_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
-
- for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
- if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
- table->maxVDDCIndexInPPTable = i;
- break;
+ if (si_pi->voltage_control_svi2) {
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+ si_pi->svc_gpio_id);
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+ si_pi->svd_gpio_id);
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+ 2);
+ } else {
+ if (eg_pi->vddc_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+ for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+ if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+ table->maxVDDCIndexInPPTable = i;
+ break;
+ }
}
}
- }
- if (eg_pi->vddci_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
+ if (eg_pi->vddci_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
- }
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ }
- if (si_pi->mvdd_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
+ if (si_pi->mvdd_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
- cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
- }
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+ cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+ }
- if (si_pi->vddc_phase_shed_control) {
- if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
- &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
- si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
+ if (si_pi->vddc_phase_shed_control) {
+ if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+ si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
- table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+ table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
- si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
- (u32)si_pi->vddc_phase_shed_table.phase_delay);
- } else {
- si_pi->vddc_phase_shed_control = false;
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+ (u32)si_pi->vddc_phase_shed_table.phase_delay);
+ } else {
+ si_pi->vddc_phase_shed_control = false;
+ }
}
}
@@ -5798,16 +5845,17 @@ int si_dpm_enable(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct si_power_info *si_pi = si_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
if (si_is_smc_running(rdev))
return -EINVAL;
- if (pi->voltage_control)
+ if (pi->voltage_control || si_pi->voltage_control_svi2)
si_enable_voltage_control(rdev, true);
if (pi->mvdd_control)
si_get_mvdd_configuration(rdev);
- if (pi->voltage_control) {
+ if (pi->voltage_control || si_pi->voltage_control_svi2) {
ret = si_construct_voltage_tables(rdev);
if (ret) {
DRM_ERROR("si_construct_voltage_tables failed\n");
@@ -6406,16 +6454,32 @@ int si_dpm_init(struct radeon_device *rdev)
ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
pi->voltage_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!pi->voltage_control) {
+ si_pi->voltage_control_svi2 =
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_SVID2);
+ if (si_pi->voltage_control_svi2)
+ radeon_atom_get_svi2_info(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+ }
pi->mvdd_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
eg_pi->vddci_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!eg_pi->vddci_control)
+ si_pi->vddci_control_svi2 =
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_SVID2);
si_pi->vddc_phase_shed_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_PHASE_LUT);
rv770_get_engine_memory_ss(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index 4ce5032cdf49..8b5c06a0832d 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -170,6 +170,8 @@ struct si_power_info {
bool vddc_phase_shed_control;
bool pspp_notify_required;
bool sclk_deep_sleep_above_low;
+ bool voltage_control_svi2;
+ bool vddci_control_svi2;
/* smc offsets */
u32 sram_end;
u32 state_table_start;
@@ -192,6 +194,9 @@ struct si_power_info {
SMC_SIslands_MCRegisters smc_mc_reg_table;
SISLANDS_SMC_STATETABLE smc_statetable;
PP_SIslands_PAPMParameters papm_parm;
+ /* SVI2 */
+ u8 svd_gpio_id;
+ u8 svc_gpio_id;
};
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index e80efcf0c230..73dbc79c959d 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -219,36 +219,48 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
if (!rdev->smc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_TAHITI:
- ucode_start_address = TAHITI_SMC_UCODE_START;
- ucode_size = TAHITI_SMC_UCODE_SIZE;
- break;
- case CHIP_PITCAIRN:
- ucode_start_address = PITCAIRN_SMC_UCODE_START;
- ucode_size = PITCAIRN_SMC_UCODE_SIZE;
- break;
- case CHIP_VERDE:
- ucode_start_address = VERDE_SMC_UCODE_START;
- ucode_size = VERDE_SMC_UCODE_SIZE;
- break;
- case CHIP_OLAND:
- ucode_start_address = OLAND_SMC_UCODE_START;
- ucode_size = OLAND_SMC_UCODE_SIZE;
- break;
- case CHIP_HAINAN:
- ucode_start_address = HAINAN_SMC_UCODE_START;
- ucode_size = HAINAN_SMC_UCODE_SIZE;
- break;
- default:
- DRM_ERROR("unknown asic in smc ucode loader\n");
- BUG();
+ if (rdev->new_fw) {
+ const struct smc_firmware_header_v1_0 *hdr =
+ (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+ radeon_ucode_print_smc_hdr(&hdr->header);
+
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ ucode_start_address = TAHITI_SMC_UCODE_START;
+ ucode_size = TAHITI_SMC_UCODE_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ ucode_start_address = PITCAIRN_SMC_UCODE_START;
+ ucode_size = PITCAIRN_SMC_UCODE_SIZE;
+ break;
+ case CHIP_VERDE:
+ ucode_start_address = VERDE_SMC_UCODE_START;
+ ucode_size = VERDE_SMC_UCODE_SIZE;
+ break;
+ case CHIP_OLAND:
+ ucode_start_address = OLAND_SMC_UCODE_START;
+ ucode_size = OLAND_SMC_UCODE_SIZE;
+ break;
+ case CHIP_HAINAN:
+ ucode_start_address = HAINAN_SMC_UCODE_START;
+ ucode_size = HAINAN_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+ src = (const u8 *)rdev->smc_fw->data;
}
if (ucode_size & 3)
return -EINVAL;
- src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 10e945a49479..623a0b1e2d9d 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -241,6 +241,9 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 20da6ff183df..32e50be9c4ac 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- /* There are stability issues reported on latops with
- * bapm installed when switching between AC and battery
- * power. At the same time, some desktop boards hang
- * if it's not enabled and dpm is enabled.
+ /* There are stability issues reported on with
+ * bapm enabled when switching between AC and battery
+ * power. At the same time, some MSI boards hang
+ * if it's not enabled and dpm is enabled. Just enable
+ * it for MSI boards right now.
*/
- if (rdev->flags & RADEON_IS_MOBILITY)
- pi->enable_bapm = false;
- else
+ if (rdev->pdev->subsystem_vendor == 0x1462)
pi->enable_bapm = true;
+ else
+ pi->enable_bapm = false;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 792fd1d20e86..fda64b7b73e8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -187,7 +187,7 @@ static struct drm_driver rcar_du_driver = {
* Power management
*/
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index a87edfac111f..76026104d000 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -135,7 +135,9 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
{
struct rcar_du_device *rcdu = dev->dev_private;
const struct rcar_du_format_info *format;
+ unsigned int max_pitch;
unsigned int align;
+ unsigned int bpp;
format = rcar_du_format_info(mode_cmd->pixel_format);
if (format == NULL) {
@@ -144,13 +146,20 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
+ /*
+ * The pitch and alignment constraints are expressed in pixels on the
+ * hardware side and in bytes in the DRM API.
+ */
+ bpp = format->planes == 2 ? 1 : format->bpp / 8;
+ max_pitch = 4096 * bpp;
+
if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
align = 128;
else
- align = 16 * format->bpp / 8;
+ align = 16 * bpp;
if (mode_cmd->pitches[0] & (align - 1) ||
- mode_cmd->pitches[0] >= 8192) {
+ mode_cmd->pitches[0] >= max_pitch) {
dev_dbg(dev->dev, "invalid pitch value %u\n",
mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 82c84c7fd4f6..ff4ba483b602 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -297,7 +297,7 @@ static struct drm_driver shmob_drm_driver = {
* Power management
*/
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
static int shmob_drm_pm_suspend(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
new file mode 100644
index 000000000000..2d9d4252d598
--- /dev/null
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -0,0 +1,14 @@
+config DRM_STI
+ tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
+ depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ help
+ Choose this option to enable DRM on STM stiH41x chipset
+
+config DRM_STI_FBDEV
+ bool "DRM frame buffer device for STMicroelectronics SoC stiH41x Serie"
+ depends on DRM_STI
+ help
+ Choose this option to enable FBDEV on top of DRM for STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
new file mode 100644
index 000000000000..04ac2ceef27f
--- /dev/null
+++ b/drivers/gpu/drm/sti/Makefile
@@ -0,0 +1,21 @@
+sticompositor-y := \
+ sti_layer.o \
+ sti_mixer.o \
+ sti_gdp.o \
+ sti_vid.o \
+ sti_compositor.o \
+ sti_drm_crtc.o \
+ sti_drm_plane.o
+
+stihdmi-y := sti_hdmi.o \
+ sti_hdmi_tx3g0c55phy.o \
+ sti_hdmi_tx3g4c28phy.o \
+
+obj-$(CONFIG_DRM_STI) = \
+ sti_vtg.o \
+ sti_vtac.o \
+ stihdmi.o \
+ sti_hda.o \
+ sti_tvout.o \
+ sticompositor.o \
+ sti_drm_drv.o \ No newline at end of file
diff --git a/drivers/gpu/drm/sti/NOTES b/drivers/gpu/drm/sti/NOTES
new file mode 100644
index 000000000000..57e257969198
--- /dev/null
+++ b/drivers/gpu/drm/sti/NOTES
@@ -0,0 +1,58 @@
+1. stiH display hardware IP
+---------------------------
+The STMicroelectronics stiH SoCs use a common chain of HW display IP blocks:
+- The High Quality Video Display Processor (HQVDP) gets video frames from a
+ video decoder and does high quality video processing, including scaling.
+
+- The Compositor is a multiplane, dual-mixer (Main & Aux) digital processor. It
+ has several inputs:
+ - The graphics planes are internally processed by the Generic Display
+ Pipeline (GDP).
+ - The video plug (VID) connects to the HQVDP output.
+ - The cursor handles ... a cursor.
+- The TV OUT pre-formats (convert, clip, round) the compositor output data
+- The HDMI / DVO / HD Analog / SD analog IP builds the video signals
+ - DVO (Digital Video Output) handles a 24bits parallel signal
+ - The HD analog signal is typically driven by a YCbCr cable, supporting up to
+ 1080i mode.
+ - The SD analog signal is typically used for legacy TV
+- The VTG (Video Timing Generators) build Vsync signals used by the other HW IP
+Note that some stiH drivers support only a subset of thee HW IP.
+
+ .-------------. .-----------. .-----------.
+GPU >-------------+GDP Main | | +---+ HDMI +--> HDMI
+GPU >-------------+GDP mixer+---+ | :===========:
+GPU >-------------+Cursor | | +---+ DVO +--> 24b//
+ ------- | COMPOSITOR | | TV OUT | :===========:
+ | | | | | +---+ HD analog +--> YCbCr
+Vid >--+ HQVDP +--+VID Aux +---+ | :===========:
+dec | | | mixer| | +---+ SD analog +--> CVBS
+ '-------' '-------------' '-----------' '-----------'
+ .-----------.
+ | main+--> Vsync
+ | VTG |
+ | aux+--> Vsync
+ '-----------'
+
+2. DRM / HW mapping
+-------------------
+These IP are mapped to the DRM objects as following:
+- The CRTCs are mapped to the Compositor Main and Aux Mixers
+- The Framebuffers and planes are mapped to the Compositor GDP (non video
+ buffers) and to HQVDP+VID (video buffers)
+- The Cursor is mapped to the Compositor Cursor
+- The Encoders are mapped to the TVOut
+- The Bridges/Connectors are mapped to the HDMI / DVO / HD Analog / SD analog
+
+FB & planes Cursor CRTC Encoders Bridges/Connectors
+ | | | | |
+ | | | | |
+ | .-------------. | .-----------. .-----------. |
+ +------------> |GDP | Main | | | +-> | | HDMI | <-+
+ +------------> |GDP v mixer|<+ | | | :===========: |
+ | |Cursor | | | +-> | | DVO | <-+
+ | ------- | COMPOSITOR | | |TV OUT | | :===========: |
+ | | | | | | | +-> | | HD analog | <-+
+ +-> | HQVDP | |VID Aux |<+ | | | :===========: |
+ | | | mixer| | +-> | | SD analog | <-+
+ '-------' '-------------' '-----------' '-----------'
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
new file mode 100644
index 000000000000..390d93e9a06c
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+
+#include "sti_compositor.h"
+#include "sti_drm_crtc.h"
+#include "sti_drm_drv.h"
+#include "sti_drm_plane.h"
+#include "sti_gdp.h"
+#include "sti_vtg.h"
+
+/*
+ * stiH407 compositor properties
+ */
+struct sti_compositor_data stih407_compositor_data = {
+ .nb_subdev = 6,
+ .subdev_desc = {
+ {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
+ {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
+ {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
+ {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
+ {STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
+ {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
+ },
+};
+
+/*
+ * stiH416 compositor properties
+ * Note:
+ * on stih416 MIXER_AUX has a different base address from MIXER_MAIN
+ * Moreover, GDPx is different for Main and Aux Mixer. So this subdev map does
+ * not fit for stiH416 if we want to enable the MIXER_AUX.
+ */
+struct sti_compositor_data stih416_compositor_data = {
+ .nb_subdev = 3,
+ .subdev_desc = {
+ {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
+ {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
+ {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
+ },
+};
+
+static int sti_compositor_init_subdev(struct sti_compositor *compo,
+ struct sti_compositor_subdev_descriptor *desc,
+ unsigned int array_size)
+{
+ unsigned int i, mixer_id = 0, layer_id = 0;
+
+ for (i = 0; i < array_size; i++) {
+ switch (desc[i].type) {
+ case STI_MIXER_MAIN_SUBDEV:
+ case STI_MIXER_AUX_SUBDEV:
+ compo->mixer[mixer_id++] =
+ sti_mixer_create(compo->dev, desc[i].id,
+ compo->regs + desc[i].offset);
+ break;
+ case STI_GPD_SUBDEV:
+ case STI_VID_SUBDEV:
+ compo->layer[layer_id++] =
+ sti_layer_create(compo->dev, desc[i].id,
+ compo->regs + desc[i].offset);
+ break;
+ /* case STI_CURSOR_SUBDEV : TODO */
+ default:
+ DRM_ERROR("Unknow subdev compoment type\n");
+ return 1;
+ }
+
+ }
+ compo->nb_mixers = mixer_id;
+ compo->nb_layers = layer_id;
+
+ return 0;
+}
+
+static int sti_compositor_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ unsigned int i, crtc = 0, plane = 0;
+ struct sti_drm_private *dev_priv = drm_dev->dev_private;
+ struct drm_plane *cursor = NULL;
+ struct drm_plane *primary = NULL;
+
+ dev_priv->compo = compo;
+
+ for (i = 0; i < compo->nb_layers; i++) {
+ if (compo->layer[i]) {
+ enum sti_layer_desc desc = compo->layer[i]->desc;
+ enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
+ enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
+
+ if (compo->mixer[crtc])
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
+
+ switch (type) {
+ case STI_CUR:
+ cursor = sti_drm_plane_init(drm_dev,
+ compo->layer[i],
+ (1 << crtc) - 1,
+ DRM_PLANE_TYPE_CURSOR);
+ break;
+ case STI_GDP:
+ case STI_VID:
+ primary = sti_drm_plane_init(drm_dev,
+ compo->layer[i],
+ (1 << crtc) - 1, plane_type);
+ plane++;
+ break;
+ case STI_BCK:
+ break;
+ }
+
+ /* The first planes are reserved for primary planes*/
+ if (compo->mixer[crtc]) {
+ sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
+ primary, cursor);
+ crtc++;
+ cursor = NULL;
+ }
+ }
+ }
+
+ drm_vblank_init(drm_dev, crtc);
+ /* Allow usage of vblank without having to call drm_irq_install */
+ drm_dev->irq_enabled = 1;
+
+ DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
+ crtc, plane);
+ DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
+
+ return 0;
+}
+
+static void sti_compositor_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_compositor_ops = {
+ .bind = sti_compositor_bind,
+ .unbind = sti_compositor_unbind,
+};
+
+static const struct of_device_id compositor_of_match[] = {
+ {
+ .compatible = "st,stih416-compositor",
+ .data = &stih416_compositor_data,
+ }, {
+ .compatible = "st,stih407-compositor",
+ .data = &stih407_compositor_data,
+ }, {
+ /* end node */
+ }
+};
+MODULE_DEVICE_TABLE(of, compositor_of_match);
+
+static int sti_compositor_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *vtg_np;
+ struct sti_compositor *compo;
+ struct resource *res;
+ int err;
+
+ compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
+ if (!compo) {
+ DRM_ERROR("Failed to allocate compositor context\n");
+ return -ENOMEM;
+ }
+ compo->dev = dev;
+ compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
+
+ /* populate data structure depending on compatibility */
+ BUG_ON(!of_match_node(compositor_of_match, np)->data);
+
+ memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
+ sizeof(struct sti_compositor_data));
+
+ /* Get Memory ressources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ DRM_ERROR("Get memory resource failed\n");
+ return -ENXIO;
+ }
+ compo->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (compo->regs == NULL) {
+ DRM_ERROR("Register mapping failed\n");
+ return -ENXIO;
+ }
+
+ /* Get clock resources */
+ compo->clk_compo_main = devm_clk_get(dev, "compo_main");
+ if (IS_ERR(compo->clk_compo_main)) {
+ DRM_ERROR("Cannot get compo_main clock\n");
+ return PTR_ERR(compo->clk_compo_main);
+ }
+
+ compo->clk_compo_aux = devm_clk_get(dev, "compo_aux");
+ if (IS_ERR(compo->clk_compo_aux)) {
+ DRM_ERROR("Cannot get compo_aux clock\n");
+ return PTR_ERR(compo->clk_compo_aux);
+ }
+
+ compo->clk_pix_main = devm_clk_get(dev, "pix_main");
+ if (IS_ERR(compo->clk_pix_main)) {
+ DRM_ERROR("Cannot get pix_main clock\n");
+ return PTR_ERR(compo->clk_pix_main);
+ }
+
+ compo->clk_pix_aux = devm_clk_get(dev, "pix_aux");
+ if (IS_ERR(compo->clk_pix_aux)) {
+ DRM_ERROR("Cannot get pix_aux clock\n");
+ return PTR_ERR(compo->clk_pix_aux);
+ }
+
+ /* Get reset resources */
+ compo->rst_main = devm_reset_control_get(dev, "compo-main");
+ /* Take compo main out of reset */
+ if (!IS_ERR(compo->rst_main))
+ reset_control_deassert(compo->rst_main);
+
+ compo->rst_aux = devm_reset_control_get(dev, "compo-aux");
+ /* Take compo aux out of reset */
+ if (!IS_ERR(compo->rst_aux))
+ reset_control_deassert(compo->rst_aux);
+
+ vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
+ if (vtg_np)
+ compo->vtg_main = of_vtg_find(vtg_np);
+
+ vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
+ if (vtg_np)
+ compo->vtg_aux = of_vtg_find(vtg_np);
+
+ /* Initialize compositor subdevices */
+ err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
+ compo->data.nb_subdev);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, compo);
+
+ return component_add(&pdev->dev, &sti_compositor_ops);
+}
+
+static int sti_compositor_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_compositor_ops);
+ return 0;
+}
+
+static struct platform_driver sti_compositor_driver = {
+ .driver = {
+ .name = "sti-compositor",
+ .owner = THIS_MODULE,
+ .of_match_table = compositor_of_match,
+ },
+ .probe = sti_compositor_probe,
+ .remove = sti_compositor_remove,
+};
+
+module_platform_driver(sti_compositor_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
new file mode 100644
index 000000000000..3ea19db72e0f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_COMPOSITOR_H_
+#define _STI_COMPOSITOR_H_
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+
+#include "sti_layer.h"
+#include "sti_mixer.h"
+
+#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
+
+#define STI_MAX_LAYER 8
+#define STI_MAX_MIXER 2
+
+enum sti_compositor_subdev_type {
+ STI_MIXER_MAIN_SUBDEV,
+ STI_MIXER_AUX_SUBDEV,
+ STI_GPD_SUBDEV,
+ STI_VID_SUBDEV,
+ STI_CURSOR_SUBDEV,
+};
+
+struct sti_compositor_subdev_descriptor {
+ enum sti_compositor_subdev_type type;
+ int id;
+ unsigned int offset;
+};
+
+/**
+ * STI Compositor data structure
+ *
+ * @nb_subdev: number of subdevices supported by the compositor
+ * @subdev_desc: subdev list description
+ */
+#define MAX_SUBDEV 9
+struct sti_compositor_data {
+ unsigned int nb_subdev;
+ struct sti_compositor_subdev_descriptor subdev_desc[MAX_SUBDEV];
+};
+
+/**
+ * STI Compositor structure
+ *
+ * @dev: driver device
+ * @regs: registers (main)
+ * @data: device data
+ * @clk_compo_main: clock for main compo
+ * @clk_compo_aux: clock for aux compo
+ * @clk_pix_main: pixel clock for main path
+ * @clk_pix_aux: pixel clock for aux path
+ * @rst_main: reset control of the main path
+ * @rst_aux: reset control of the aux path
+ * @mixer: array of mixers
+ * @vtg_main: vtg for main data path
+ * @vtg_aux: vtg for auxillary data path
+ * @layer: array of layers
+ * @nb_mixers: number of mixers for this compositor
+ * @nb_layers: number of layers (GDP,VID,...) for this compositor
+ * @enable: true if compositor is enable else false
+ * @vtg_vblank_nb: callback for VTG VSYNC notification
+ */
+struct sti_compositor {
+ struct device *dev;
+ void __iomem *regs;
+ struct sti_compositor_data data;
+ struct clk *clk_compo_main;
+ struct clk *clk_compo_aux;
+ struct clk *clk_pix_main;
+ struct clk *clk_pix_aux;
+ struct reset_control *rst_main;
+ struct reset_control *rst_aux;
+ struct sti_mixer *mixer[STI_MAX_MIXER];
+ struct sti_vtg *vtg_main;
+ struct sti_vtg *vtg_aux;
+ struct sti_layer *layer[STI_MAX_LAYER];
+ int nb_mixers;
+ int nb_layers;
+ bool enable;
+ struct notifier_block vtg_vblank_nb;
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
new file mode 100644
index 000000000000..d2ae0c0e13be
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_drm_drv.h"
+#include "sti_drm_crtc.h"
+#include "sti_vtg.h"
+
+static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+
+ compo->enable = true;
+
+ /* Prepare and enable the compo IP clock */
+ if (mixer->id == STI_MIXER_MAIN) {
+ if (clk_prepare_enable(compo->clk_compo_main))
+ DRM_INFO("Failed to prepare/enable compo_main clk\n");
+ } else {
+ if (clk_prepare_enable(compo->clk_compo_aux))
+ DRM_INFO("Failed to prepare/enable compo_aux clk\n");
+ }
+}
+
+static void sti_drm_crtc_commit(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct sti_layer *layer;
+
+ if ((!mixer || !compo)) {
+ DRM_ERROR("Can not find mixer or compositor)\n");
+ return;
+ }
+
+ /* get GDP which is reserved to the CRTC FB */
+ layer = to_sti_layer(crtc->primary);
+ if (layer)
+ sti_layer_commit(layer);
+ else
+ DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
+
+ /* Enable layer on mixer */
+ if (sti_mixer_set_layer_status(mixer, layer, true))
+ DRM_ERROR("Can not enable layer at mixer\n");
+}
+
+static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* accept the provided drm_display_mode, do not fix it up */
+ return true;
+}
+
+static int
+sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct sti_layer *layer;
+ struct clk *clk;
+ int rate = mode->clock * 1000;
+ int res;
+ unsigned int w, h;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d mode:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ crtc->primary->fb->base.id, mode->base.id, mode->name);
+
+ DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
+ mode->vrefresh, mode->clock,
+ mode->hdisplay,
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal,
+ mode->vdisplay,
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->type, mode->flags);
+
+ /* Set rate and prepare/enable pixel clock */
+ if (mixer->id == STI_MIXER_MAIN)
+ clk = compo->clk_pix_main;
+ else
+ clk = compo->clk_pix_aux;
+
+ res = clk_set_rate(clk, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
+ return -EINVAL;
+ }
+ if (clk_prepare_enable(clk)) {
+ DRM_ERROR("Failed to prepare/enable pix clk\n");
+ return -EINVAL;
+ }
+
+ sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux, &crtc->mode);
+
+ /* a GDP is reserved to the CRTC FB */
+ layer = to_sti_layer(crtc->primary);
+ if (!layer) {
+ DRM_ERROR("Can not find GDP0)\n");
+ return -EINVAL;
+ }
+
+ /* copy the mode data adjusted by mode_fixup() into crtc->mode
+ * so that hardware can be set to proper mode
+ */
+ memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
+
+ res = sti_mixer_set_layer_depth(mixer, layer);
+ if (res) {
+ DRM_ERROR("Can not set layer depth\n");
+ return -EINVAL;
+ }
+ res = sti_mixer_active_video_area(mixer, &crtc->mode);
+ if (res) {
+ DRM_ERROR("Can not set active video area\n");
+ return -EINVAL;
+ }
+
+ w = crtc->primary->fb->width - x;
+ h = crtc->primary->fb->height - y;
+
+ return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ mixer->id, 0, 0, w, h, x, y, w, h);
+}
+
+static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct sti_layer *layer;
+ unsigned int w, h;
+ int ret;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d (%d,%d)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ crtc->primary->fb->base.id, x, y);
+
+ /* GDP is reserved to the CRTC FB */
+ layer = to_sti_layer(crtc->primary);
+ if (!layer) {
+ DRM_ERROR("Can not find GDP0)\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ w = crtc->primary->fb->width - crtc->x;
+ h = crtc->primary->fb->height - crtc->y;
+
+ ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ mixer->id, 0, 0, w, h,
+ crtc->x, crtc->y, w, h);
+ if (ret) {
+ DRM_ERROR("Can not prepare layer\n");
+ goto out;
+ }
+
+ sti_drm_crtc_commit(crtc);
+out:
+ return ret;
+}
+
+static void sti_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+ /* do nothing */
+}
+
+static void sti_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct sti_layer *layer;
+
+ if (!compo->enable)
+ return;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
+
+ /* Disable Background */
+ sti_mixer_set_background_status(mixer, false);
+
+ /* Disable GDP */
+ layer = to_sti_layer(crtc->primary);
+ if (!layer) {
+ DRM_ERROR("Cannot find GDP0\n");
+ return;
+ }
+
+ /* Disable layer at mixer level */
+ if (sti_mixer_set_layer_status(mixer, layer, false))
+ DRM_ERROR("Can not disable %s layer at mixer\n",
+ sti_layer_to_str(layer));
+
+ /* Wait a while to be sure that a Vsync event is received */
+ msleep(WAIT_NEXT_VSYNC_MS);
+
+ /* Then disable layer itself */
+ sti_layer_disable(layer);
+
+ drm_vblank_off(crtc->dev, mixer->id);
+
+ /* Disable pixel clock and compo IP clocks */
+ if (mixer->id == STI_MIXER_MAIN) {
+ clk_disable_unprepare(compo->clk_pix_main);
+ clk_disable_unprepare(compo->clk_compo_main);
+ } else {
+ clk_disable_unprepare(compo->clk_pix_aux);
+ clk_disable_unprepare(compo->clk_compo_aux);
+ }
+
+ compo->enable = false;
+}
+
+static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
+ .dpms = sti_drm_crtc_dpms,
+ .prepare = sti_drm_crtc_prepare,
+ .commit = sti_drm_crtc_commit,
+ .mode_fixup = sti_drm_crtc_mode_fixup,
+ .mode_set = sti_drm_crtc_mode_set,
+ .mode_set_base = sti_drm_crtc_mode_set_base,
+ .load_lut = sti_drm_crtc_load_lut,
+ .disable = sti_drm_crtc_disable,
+};
+
+static int sti_drm_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *drm_dev = crtc->dev;
+ struct drm_framebuffer *old_fb;
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ unsigned long flags;
+ int ret;
+
+ DRM_DEBUG_KMS("fb %d --> fb %d\n",
+ crtc->primary->fb->base.id, fb->base.id);
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ old_fb = crtc->primary->fb;
+ crtc->primary->fb = fb;
+ ret = sti_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+ if (ret) {
+ DRM_ERROR("failed\n");
+ crtc->primary->fb = old_fb;
+ goto out;
+ }
+
+ if (event) {
+ event->pipe = mixer->id;
+
+ ret = drm_vblank_get(drm_dev, event->pipe);
+ if (ret) {
+ DRM_ERROR("Cannot get vblank\n");
+ goto out;
+ }
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (mixer->pending_event) {
+ drm_vblank_put(drm_dev, event->pipe);
+ ret = -EBUSY;
+ } else {
+ mixer->pending_event = event;
+ }
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ }
+out:
+ mutex_unlock(&drm_dev->struct_mutex);
+ return ret;
+}
+
+static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("\n");
+ drm_crtc_cleanup(crtc);
+}
+
+static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ DRM_DEBUG_KMS("\n");
+ return 0;
+}
+
+int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct drm_device *drm_dev;
+ struct sti_compositor *compo =
+ container_of(nb, struct sti_compositor, vtg_vblank_nb);
+ int *crtc = data;
+ unsigned long flags;
+ struct sti_drm_private *priv;
+
+ drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
+ priv = drm_dev->dev_private;
+
+ if ((event != VTG_TOP_FIELD_EVENT) &&
+ (event != VTG_BOTTOM_FIELD_EVENT)) {
+ DRM_ERROR("unknown event: %lu\n", event);
+ return -EINVAL;
+ }
+
+ drm_handle_vblank(drm_dev, *crtc);
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (compo->mixer[*crtc]->pending_event) {
+ drm_send_vblank_event(drm_dev, -1,
+ compo->mixer[*crtc]->pending_event);
+ drm_vblank_put(drm_dev, *crtc);
+ compo->mixer[*crtc]->pending_event = NULL;
+ }
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ return 0;
+}
+
+int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct sti_drm_private *dev_priv = dev->dev_private;
+ struct sti_compositor *compo = dev_priv->compo;
+ struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+
+ if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux,
+ vtg_vblank_nb, crtc)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
+
+void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct sti_drm_private *priv = dev->dev_private;
+ struct sti_compositor *compo = priv->compo;
+ struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+ unsigned long flags;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
+ DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+ /* free the resources of the pending requests */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (compo->mixer[crtc]->pending_event) {
+ drm_vblank_put(dev, crtc);
+ compo->mixer[crtc]->pending_event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+}
+EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
+
+static struct drm_crtc_funcs sti_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = sti_drm_crtc_page_flip,
+ .destroy = sti_drm_crtc_destroy,
+ .set_property = sti_drm_crtc_set_property,
+};
+
+bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+ if (mixer->id == STI_MIXER_MAIN)
+ return true;
+
+ return false;
+}
+
+int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor)
+{
+ struct drm_crtc *crtc = &mixer->drm_crtc;
+ int res;
+
+ res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
+ &sti_crtc_funcs);
+ if (res) {
+ DRM_ERROR("Can not initialze CRTC\n");
+ return -EINVAL;
+ }
+
+ drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs);
+
+ DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n",
+ crtc->base.id, sti_mixer_to_str(mixer));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
new file mode 100644
index 000000000000..caca8b14f017
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRM_CRTC_H_
+#define _STI_DRM_CRTC_H_
+
+#include <drm/drmP.h>
+
+struct sti_mixer;
+
+int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor);
+int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data);
+bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
new file mode 100644
index 000000000000..a7cc24917a96
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/component.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "sti_drm_drv.h"
+#include "sti_drm_crtc.h"
+
+#define DRIVER_NAME "sti"
+#define DRIVER_DESC "STMicroelectronics SoC DRM"
+#define DRIVER_DATE "20140601"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define STI_MAX_FB_HEIGHT 4096
+#define STI_MAX_FB_WIDTH 4096
+
+static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+};
+
+static void sti_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * set max width and height as default value.
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
+ dev->mode_config.max_height = STI_MAX_FB_WIDTH;
+
+ dev->mode_config.funcs = &sti_drm_mode_config_funcs;
+}
+
+static int sti_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct sti_drm_private *private;
+ int ret;
+
+ private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
+ if (!private) {
+ DRM_ERROR("Failed to allocate private\n");
+ return -ENOMEM;
+ }
+ dev->dev_private = (void *)private;
+ private->drm_dev = dev;
+
+ drm_mode_config_init(dev);
+ drm_kms_helper_poll_init(dev);
+
+ sti_drm_mode_config_init(dev);
+
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ return ret;
+
+ drm_helper_disable_unused_functions(dev);
+
+#ifdef CONFIG_DRM_STI_FBDEV
+ drm_fbdev_cma_init(dev, 32,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+#endif
+ return 0;
+}
+
+static const struct file_operations sti_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .release = drm_release,
+};
+
+static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
+{
+ /* we want to be able to write in mmapped buffer */
+ flags |= O_RDWR;
+ return drm_gem_prime_export(dev, obj, flags);
+}
+
+static struct drm_driver sti_drm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+ DRIVER_GEM | DRIVER_PRIME,
+ .load = sti_drm_load,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .fops = &sti_drm_driver_fops,
+
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = sti_drm_crtc_enable_vblank,
+ .disable_vblank = sti_drm_crtc_disable_vblank,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = sti_drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int sti_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
+}
+
+static void sti_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops sti_drm_ops = {
+ .bind = sti_drm_bind,
+ .unbind = sti_drm_unbind,
+};
+
+static int sti_drm_master_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct device_node *child_np;
+ struct component_match *match = NULL;
+
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+
+ child_np = of_get_next_available_child(node, NULL);
+
+ while (child_np) {
+ component_match_add(dev, &match, compare_of, child_np);
+ of_node_put(child_np);
+ child_np = of_get_next_available_child(node, child_np);
+ }
+
+ return component_master_add_with_match(dev, &sti_drm_ops, match);
+}
+
+static int sti_drm_master_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &sti_drm_ops);
+ return 0;
+}
+
+static struct platform_driver sti_drm_master_driver = {
+ .probe = sti_drm_master_probe,
+ .remove = sti_drm_master_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME "__master",
+ },
+};
+
+static int sti_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *master;
+
+ of_platform_populate(node, NULL, NULL, dev);
+
+ platform_driver_register(&sti_drm_master_driver);
+ master = platform_device_register_resndata(dev,
+ DRIVER_NAME "__master", -1,
+ NULL, 0, NULL, 0);
+ if (!master)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+}
+
+static int sti_drm_platform_remove(struct platform_device *pdev)
+{
+ struct platform_device *master = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+ platform_device_unregister(master);
+ platform_driver_unregister(&sti_drm_master_driver);
+ return 0;
+}
+
+static const struct of_device_id sti_drm_dt_ids[] = {
+ { .compatible = "st,sti-display-subsystem", },
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
+
+static struct platform_driver sti_drm_platform_driver = {
+ .probe = sti_drm_platform_probe,
+ .remove = sti_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = sti_drm_dt_ids,
+ },
+};
+
+module_platform_driver(sti_drm_platform_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h
new file mode 100644
index 000000000000..ec5e2eb8dff9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_drv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRM_DRV_H_
+#define _STI_DRM_DRV_H_
+
+#include <drm/drmP.h>
+
+struct sti_compositor;
+struct sti_tvout;
+
+/**
+ * STI drm private structure
+ * This structure is stored as private in the drm_device
+ *
+ * @compo: compositor
+ * @plane_zorder_property: z-order property for CRTC planes
+ * @drm_dev: drm device
+ */
+struct sti_drm_private {
+ struct sti_compositor *compo;
+ struct drm_property *plane_zorder_property;
+ struct drm_device *drm_dev;
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
new file mode 100644
index 000000000000..f4118d4cac22
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_compositor.h"
+#include "sti_drm_drv.h"
+#include "sti_drm_plane.h"
+#include "sti_vtg.h"
+
+enum sti_layer_desc sti_layer_default_zorder[] = {
+ STI_GDP_0,
+ STI_VID_0,
+ STI_GDP_1,
+ STI_VID_1,
+ STI_GDP_2,
+ STI_GDP_3,
+};
+
+/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
+
+static int
+sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct sti_layer *layer = to_sti_layer(plane);
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ int res;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s) drm fb:%d\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ plane->base.id, sti_layer_to_str(layer), fb->base.id);
+ DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
+
+ res = sti_mixer_set_layer_depth(mixer, layer);
+ if (res) {
+ DRM_ERROR("Can not set layer depth\n");
+ return res;
+ }
+
+ /* src_x are in 16.16 format. */
+ res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
+ if (res) {
+ DRM_ERROR("Layer prepare failed\n");
+ return res;
+ }
+
+ res = sti_layer_commit(layer);
+ if (res) {
+ DRM_ERROR("Layer commit failed\n");
+ return res;
+ }
+
+ res = sti_mixer_set_layer_status(mixer, layer, true);
+ if (res) {
+ DRM_ERROR("Can not enable layer at mixer\n");
+ return res;
+ }
+
+ return 0;
+}
+
+static int sti_drm_disable_plane(struct drm_plane *plane)
+{
+ struct sti_layer *layer;
+ struct sti_mixer *mixer;
+ int lay_res, mix_res;
+
+ if (!plane->crtc) {
+ DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
+ return 0;
+ }
+ layer = to_sti_layer(plane);
+ mixer = to_sti_mixer(plane->crtc);
+
+ DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+ plane->crtc->base.id, sti_mixer_to_str(mixer),
+ plane->base.id, sti_layer_to_str(layer));
+
+ /* Disable layer at mixer level */
+ mix_res = sti_mixer_set_layer_status(mixer, layer, false);
+ if (mix_res)
+ DRM_ERROR("Can not disable layer at mixer\n");
+
+ /* Wait a while to be sure that a Vsync event is received */
+ msleep(WAIT_NEXT_VSYNC_MS);
+
+ /* Then disable layer itself */
+ lay_res = sti_layer_disable(layer);
+ if (lay_res)
+ DRM_ERROR("Layer disable failed\n");
+
+ if (lay_res || mix_res)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sti_drm_plane_destroy(struct drm_plane *plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ sti_drm_disable_plane(plane);
+ drm_plane_cleanup(plane);
+}
+
+static int sti_drm_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct sti_drm_private *private = dev->dev_private;
+ struct sti_layer *layer = to_sti_layer(plane);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (property == private->plane_zorder_property) {
+ layer->zorder = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct drm_plane_funcs sti_drm_plane_funcs = {
+ .update_plane = sti_drm_update_plane,
+ .disable_plane = sti_drm_disable_plane,
+ .destroy = sti_drm_plane_destroy,
+ .set_property = sti_drm_plane_set_property,
+};
+
+static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
+ uint64_t default_val)
+{
+ struct drm_device *dev = plane->dev;
+ struct sti_drm_private *private = dev->dev_private;
+ struct drm_property *prop;
+ struct sti_layer *layer = to_sti_layer(plane);
+
+ prop = private->plane_zorder_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "zpos", 0,
+ GAM_MIXER_NB_DEPTH_LEVEL - 1);
+ if (!prop)
+ return;
+
+ private->plane_zorder_property = prop;
+ }
+
+ drm_object_attach_property(&plane->base, prop, default_val);
+ layer->zorder = default_val;
+}
+
+struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
+ struct sti_layer *layer,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type)
+{
+ int err, i;
+ uint64_t default_zorder = 0;
+
+ err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
+ &sti_drm_plane_funcs,
+ sti_layer_get_formats(layer),
+ sti_layer_get_nb_formats(layer), type);
+ if (err) {
+ DRM_ERROR("Failed to initialize plane\n");
+ return NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
+ if (sti_layer_default_zorder[i] == layer->desc)
+ break;
+
+ default_zorder = i;
+
+ if (type == DRM_PLANE_TYPE_OVERLAY)
+ sti_drm_plane_attach_zorder_property(&layer->plane,
+ default_zorder);
+
+ DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
+ layer->plane.base.id,
+ sti_layer_to_str(layer), default_zorder);
+
+ return &layer->plane;
+}
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
new file mode 100644
index 000000000000..4f191839f2a7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_plane.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRM_PLANE_H_
+#define _STI_DRM_PLANE_H_
+
+#include <drm/drmP.h>
+
+struct sti_layer;
+
+struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
+ struct sti_layer *layer,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type);
+#endif
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
new file mode 100644
index 000000000000..4e30b74559f5
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -0,0 +1,549 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+
+#include "sti_compositor.h"
+#include "sti_gdp.h"
+#include "sti_layer.h"
+#include "sti_vtg.h"
+
+#define ENA_COLOR_FILL BIT(8)
+#define WAIT_NEXT_VSYNC BIT(31)
+
+/* GDP color formats */
+#define GDP_RGB565 0x00
+#define GDP_RGB888 0x01
+#define GDP_RGB888_32 0x02
+#define GDP_ARGB8565 0x04
+#define GDP_ARGB8888 0x05
+#define GDP_ARGB1555 0x06
+#define GDP_ARGB4444 0x07
+#define GDP_CLUT8 0x0B
+#define GDP_YCBR888 0x10
+#define GDP_YCBR422R 0x12
+#define GDP_AYCBR8888 0x15
+
+#define GAM_GDP_CTL_OFFSET 0x00
+#define GAM_GDP_AGC_OFFSET 0x04
+#define GAM_GDP_VPO_OFFSET 0x0C
+#define GAM_GDP_VPS_OFFSET 0x10
+#define GAM_GDP_PML_OFFSET 0x14
+#define GAM_GDP_PMP_OFFSET 0x18
+#define GAM_GDP_SIZE_OFFSET 0x1C
+#define GAM_GDP_NVN_OFFSET 0x24
+#define GAM_GDP_KEY1_OFFSET 0x28
+#define GAM_GDP_KEY2_OFFSET 0x2C
+#define GAM_GDP_PPT_OFFSET 0x34
+#define GAM_GDP_CML_OFFSET 0x3C
+#define GAM_GDP_MST_OFFSET 0x68
+
+#define GAM_GDP_ALPHARANGE_255 BIT(5)
+#define GAM_GDP_AGC_FULL_RANGE 0x00808080
+#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
+#define GAM_GDP_SIZE_MAX 0x7FF
+
+#define GDP_NODE_NB_BANK 2
+#define GDP_NODE_PER_FIELD 2
+
+struct sti_gdp_node {
+ u32 gam_gdp_ctl;
+ u32 gam_gdp_agc;
+ u32 reserved1;
+ u32 gam_gdp_vpo;
+ u32 gam_gdp_vps;
+ u32 gam_gdp_pml;
+ u32 gam_gdp_pmp;
+ u32 gam_gdp_size;
+ u32 reserved2;
+ u32 gam_gdp_nvn;
+ u32 gam_gdp_key1;
+ u32 gam_gdp_key2;
+ u32 reserved3;
+ u32 gam_gdp_ppt;
+ u32 reserved4;
+ u32 gam_gdp_cml;
+};
+
+struct sti_gdp_node_list {
+ struct sti_gdp_node *top_field;
+ struct sti_gdp_node *btm_field;
+};
+
+/**
+ * STI GDP structure
+ *
+ * @layer: layer structure
+ * @clk_pix: pixel clock for the current gdp
+ * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
+ * @is_curr_top: true if the current node processed is the top field
+ * @node_list: array of node list
+ */
+struct sti_gdp {
+ struct sti_layer layer;
+ struct clk *clk_pix;
+ struct notifier_block vtg_field_nb;
+ bool is_curr_top;
+ struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
+};
+
+#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
+
+static const uint32_t gdp_supported_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_AYUV,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_C8,
+};
+
+static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
+{
+ return gdp_supported_formats;
+}
+
+static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
+{
+ return ARRAY_SIZE(gdp_supported_formats);
+}
+
+static int sti_gdp_fourcc2format(int fourcc)
+{
+ switch (fourcc) {
+ case DRM_FORMAT_XRGB8888:
+ return GDP_RGB888_32;
+ case DRM_FORMAT_ARGB8888:
+ return GDP_ARGB8888;
+ case DRM_FORMAT_ARGB4444:
+ return GDP_ARGB4444;
+ case DRM_FORMAT_ARGB1555:
+ return GDP_ARGB1555;
+ case DRM_FORMAT_RGB565:
+ return GDP_RGB565;
+ case DRM_FORMAT_RGB888:
+ return GDP_RGB888;
+ case DRM_FORMAT_AYUV:
+ return GDP_AYCBR8888;
+ case DRM_FORMAT_YUV444:
+ return GDP_YCBR888;
+ case DRM_FORMAT_VYUY:
+ return GDP_YCBR422R;
+ case DRM_FORMAT_C8:
+ return GDP_CLUT8;
+ }
+ return -1;
+}
+
+static int sti_gdp_get_alpharange(int format)
+{
+ switch (format) {
+ case GDP_ARGB8565:
+ case GDP_ARGB8888:
+ case GDP_AYCBR8888:
+ return GAM_GDP_ALPHARANGE_255;
+ }
+ return 0;
+}
+
+/**
+ * sti_gdp_get_free_nodes
+ * @layer: gdp layer
+ *
+ * Look for a GDP node list that is not currently read by the HW.
+ *
+ * RETURNS:
+ * Pointer to the free GDP node list
+ */
+static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
+{
+ int hw_nvn;
+ void *virt_nvn;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ unsigned int i;
+
+ hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ if (!hw_nvn)
+ goto end;
+
+ virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++)
+ if ((virt_nvn != gdp->node_list[i].btm_field) &&
+ (virt_nvn != gdp->node_list[i].top_field))
+ return &gdp->node_list[i];
+
+ /* in hazardious cases restart with the first node */
+ DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
+ sti_layer_to_str(layer), hw_nvn);
+
+end:
+ return &gdp->node_list[0];
+}
+
+/**
+ * sti_gdp_get_current_nodes
+ * @layer: GDP layer
+ *
+ * Look for GDP nodes that are currently read by the HW.
+ *
+ * RETURNS:
+ * Pointer to the current GDP node list
+ */
+static
+struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
+{
+ int hw_nvn;
+ void *virt_nvn;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ unsigned int i;
+
+ hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ if (!hw_nvn)
+ goto end;
+
+ virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++)
+ if ((virt_nvn == gdp->node_list[i].btm_field) ||
+ (virt_nvn == gdp->node_list[i].top_field))
+ return &gdp->node_list[i];
+
+end:
+ DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
+ hw_nvn, sti_layer_to_str(layer));
+
+ return NULL;
+}
+
+/**
+ * sti_gdp_prepare_layer
+ * @lay: gdp layer
+ * @first_prepare: true if it is the first time this function is called
+ *
+ * Update the free GDP node list according to the layer properties.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
+{
+ struct sti_gdp_node_list *list;
+ struct sti_gdp_node *top_field, *btm_field;
+ struct drm_display_mode *mode = layer->mode;
+ struct device *dev = layer->dev;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ int format;
+ unsigned int depth, bpp;
+ int rate = mode->clock * 1000;
+ int res;
+ u32 ydo, xdo, yds, xds;
+
+ list = sti_gdp_get_free_nodes(layer);
+ top_field = list->top_field;
+ btm_field = list->btm_field;
+
+ dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
+ sti_layer_to_str(layer), top_field, btm_field);
+
+ /* Build the top field from layer params */
+ top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
+ top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
+ format = sti_gdp_fourcc2format(layer->format);
+ if (format == -1) {
+ DRM_ERROR("Format not supported by GDP %.4s\n",
+ (char *)&layer->format);
+ return 1;
+ }
+ top_field->gam_gdp_ctl |= format;
+ top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
+ top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
+
+ /* pixel memory location */
+ drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
+ top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
+ top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
+ top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
+
+ /* input parameters */
+ top_field->gam_gdp_pmp = layer->pitches[0];
+ top_field->gam_gdp_size =
+ clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
+ clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
+
+ /* output parameters */
+ ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
+ yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
+ top_field->gam_gdp_vpo = (ydo << 16) | xdo;
+ top_field->gam_gdp_vps = (yds << 16) | xds;
+
+ /* Same content and chained together */
+ memcpy(btm_field, top_field, sizeof(*btm_field));
+ top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field);
+ btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field);
+
+ /* Interlaced mode */
+ if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
+ btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
+ layer->pitches[0];
+
+ if (first_prepare) {
+ /* Register gdp callback */
+ if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux,
+ &gdp->vtg_field_nb, layer->mixer_id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return 1;
+ }
+
+ /* Set and enable gdp clock */
+ if (gdp->clk_pix) {
+ res = clk_set_rate(gdp->clk_pix, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+ rate);
+ return 1;
+ }
+
+ if (clk_prepare_enable(gdp->clk_pix)) {
+ DRM_ERROR("Failed to prepare/enable gdp\n");
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sti_gdp_commit_layer
+ * @lay: gdp layer
+ *
+ * Update the NVN field of the 'right' field of the current GDP node (being
+ * used by the HW) with the address of the updated ('free') top field GDP node.
+ * - In interlaced mode the 'right' field is the bottom field as we update
+ * frames starting from their top field
+ * - In progressive mode, we update both bottom and top fields which are
+ * equal nodes.
+ * At the next VSYNC, the updated node list will be used by the HW.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_gdp_commit_layer(struct sti_layer *layer)
+{
+ struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
+ struct sti_gdp_node *updated_top_node = updated_list->top_field;
+ struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node);
+ u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node);
+ struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
+
+ dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
+ sti_layer_to_str(layer),
+ updated_top_node, updated_btm_node);
+ dev_dbg(layer->dev, "Current NVN:0x%X\n",
+ readl(layer->regs + GAM_GDP_NVN_OFFSET));
+ dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
+ (unsigned long)layer->paddr,
+ readl(layer->regs + GAM_GDP_PML_OFFSET));
+
+ if (curr_list == NULL) {
+ /* First update or invalid node should directly write in the
+ * hw register */
+ DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+ sti_layer_to_str(layer));
+
+ writel(gdp->is_curr_top == true ?
+ dma_updated_btm : dma_updated_top,
+ layer->regs + GAM_GDP_NVN_OFFSET);
+ return 0;
+ }
+
+ if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (gdp->is_curr_top == true) {
+ /* Do not update in the middle of the frame, but
+ * postpone the update after the bottom field has
+ * been displayed */
+ curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
+ } else {
+ /* Direct update to avoid one frame delay */
+ writel(dma_updated_top,
+ layer->regs + GAM_GDP_NVN_OFFSET);
+ }
+ } else {
+ /* Direct update for progressive to avoid one frame delay */
+ writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
+ }
+
+ return 0;
+}
+
+/**
+ * sti_gdp_disable_layer
+ * @lay: gdp layer
+ *
+ * Disable a GDP.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_gdp_disable_layer(struct sti_layer *layer)
+{
+ unsigned int i;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ struct sti_compositor *compo = dev_get_drvdata(layer->dev);
+
+ DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+
+ /* Set the nodes as 'to be ignored on mixer' */
+ for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+ gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
+ gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
+ }
+
+ if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
+ DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+ if (gdp->clk_pix)
+ clk_disable_unprepare(gdp->clk_pix);
+
+ return 0;
+}
+
+/**
+ * sti_gdp_field_cb
+ * @nb: notifier block
+ * @event: event message
+ * @data: private data
+ *
+ * Handle VTG top field and bottom field event.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+int sti_gdp_field_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
+
+ switch (event) {
+ case VTG_TOP_FIELD_EVENT:
+ gdp->is_curr_top = true;
+ break;
+ case VTG_BOTTOM_FIELD_EVENT:
+ gdp->is_curr_top = false;
+ break;
+ default:
+ DRM_ERROR("unsupported event: %lu\n", event);
+ break;
+ }
+
+ return 0;
+}
+
+static void sti_gdp_init(struct sti_layer *layer)
+{
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ struct device_node *np = layer->dev->of_node;
+ dma_addr_t dma;
+ void *base;
+ unsigned int i, size;
+
+ /* Allocate all the nodes within a single memory page */
+ size = sizeof(struct sti_gdp_node) *
+ GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
+
+ base = dma_alloc_writecombine(layer->dev,
+ size, &dma, GFP_KERNEL | GFP_DMA);
+ if (!base) {
+ DRM_ERROR("Failed to allocate memory for GDP node\n");
+ return;
+ }
+ memset(base, 0, size);
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+ if (virt_to_dma(layer->dev, base) & 0xF) {
+ DRM_ERROR("Mem alignment failed\n");
+ return;
+ }
+ gdp->node_list[i].top_field = base;
+ DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
+ base += sizeof(struct sti_gdp_node);
+
+ if (virt_to_dma(layer->dev, base) & 0xF) {
+ DRM_ERROR("Mem alignment failed\n");
+ return;
+ }
+ gdp->node_list[i].btm_field = base;
+ DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
+ base += sizeof(struct sti_gdp_node);
+ }
+
+ if (of_device_is_compatible(np, "st,stih407-compositor")) {
+ /* GDP of STiH407 chip have its own pixel clock */
+ char *clk_name;
+
+ switch (layer->desc) {
+ case STI_GDP_0:
+ clk_name = "pix_gdp1";
+ break;
+ case STI_GDP_1:
+ clk_name = "pix_gdp2";
+ break;
+ case STI_GDP_2:
+ clk_name = "pix_gdp3";
+ break;
+ case STI_GDP_3:
+ clk_name = "pix_gdp4";
+ break;
+ default:
+ DRM_ERROR("GDP id not recognized\n");
+ return;
+ }
+
+ gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
+ if (IS_ERR(gdp->clk_pix))
+ DRM_ERROR("Cannot get %s clock\n", clk_name);
+ }
+}
+
+static const struct sti_layer_funcs gdp_ops = {
+ .get_formats = sti_gdp_get_formats,
+ .get_nb_formats = sti_gdp_get_nb_formats,
+ .init = sti_gdp_init,
+ .prepare = sti_gdp_prepare_layer,
+ .commit = sti_gdp_commit_layer,
+ .disable = sti_gdp_disable_layer,
+};
+
+struct sti_layer *sti_gdp_create(struct device *dev, int id)
+{
+ struct sti_gdp *gdp;
+
+ gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
+ if (!gdp) {
+ DRM_ERROR("Failed to allocate memory for GDP\n");
+ return NULL;
+ }
+
+ gdp->layer.ops = &gdp_ops;
+ gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
+
+ return (struct sti_layer *)gdp;
+}
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
new file mode 100644
index 000000000000..1dab68274ad3
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_GDP_H_
+#define _STI_GDP_H_
+
+#include <linux/types.h>
+
+struct sti_layer *sti_gdp_create(struct device *dev, int id);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
new file mode 100644
index 000000000000..72d957f81c05
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Fabien Dessenne <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+/* HDformatter registers */
+#define HDA_ANA_CFG 0x0000
+#define HDA_ANA_SCALE_CTRL_Y 0x0004
+#define HDA_ANA_SCALE_CTRL_CB 0x0008
+#define HDA_ANA_SCALE_CTRL_CR 0x000C
+#define HDA_ANA_ANC_CTRL 0x0010
+#define HDA_ANA_SRC_Y_CFG 0x0014
+#define HDA_COEFF_Y_PH1_TAP123 0x0018
+#define HDA_COEFF_Y_PH1_TAP456 0x001C
+#define HDA_COEFF_Y_PH2_TAP123 0x0020
+#define HDA_COEFF_Y_PH2_TAP456 0x0024
+#define HDA_COEFF_Y_PH3_TAP123 0x0028
+#define HDA_COEFF_Y_PH3_TAP456 0x002C
+#define HDA_COEFF_Y_PH4_TAP123 0x0030
+#define HDA_COEFF_Y_PH4_TAP456 0x0034
+#define HDA_ANA_SRC_C_CFG 0x0040
+#define HDA_COEFF_C_PH1_TAP123 0x0044
+#define HDA_COEFF_C_PH1_TAP456 0x0048
+#define HDA_COEFF_C_PH2_TAP123 0x004C
+#define HDA_COEFF_C_PH2_TAP456 0x0050
+#define HDA_COEFF_C_PH3_TAP123 0x0054
+#define HDA_COEFF_C_PH3_TAP456 0x0058
+#define HDA_COEFF_C_PH4_TAP123 0x005C
+#define HDA_COEFF_C_PH4_TAP456 0x0060
+#define HDA_SYNC_AWGI 0x0300
+
+/* HDA_ANA_CFG */
+#define CFG_AWG_ASYNC_EN BIT(0)
+#define CFG_AWG_ASYNC_HSYNC_MTD BIT(1)
+#define CFG_AWG_ASYNC_VSYNC_MTD BIT(2)
+#define CFG_AWG_SYNC_DEL BIT(3)
+#define CFG_AWG_FLTR_MODE_SHIFT 4
+#define CFG_AWG_FLTR_MODE_MASK (0xF << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_AWG_FLTR_MODE_SD (0 << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_AWG_FLTR_MODE_ED (1 << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_AWG_FLTR_MODE_HD (2 << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_SYNC_ON_PBPR_MASK BIT(8)
+#define CFG_PREFILTER_EN_MASK BIT(9)
+#define CFG_PBPR_SYNC_OFF_SHIFT 16
+#define CFG_PBPR_SYNC_OFF_MASK (0x7FF << CFG_PBPR_SYNC_OFF_SHIFT)
+#define CFG_PBPR_SYNC_OFF_VAL 0x117 /* Voltage dependent. stiH416 */
+
+/* Default scaling values */
+#define SCALE_CTRL_Y_DFLT 0x00C50256
+#define SCALE_CTRL_CB_DFLT 0x00DB0249
+#define SCALE_CTRL_CR_DFLT 0x00DB0249
+
+/* Video DACs control */
+#define VIDEO_DACS_CONTROL_MASK 0x0FFF
+#define VIDEO_DACS_CONTROL_SYSCFG2535 0x085C /* for stih416 */
+#define DAC_CFG_HD_OFF_SHIFT 5
+#define DAC_CFG_HD_OFF_MASK (0x7 << DAC_CFG_HD_OFF_SHIFT)
+#define VIDEO_DACS_CONTROL_SYSCFG5072 0x0120 /* for stih407 */
+#define DAC_CFG_HD_HZUVW_OFF_MASK BIT(1)
+
+
+/* Upsampler values for the alternative 2X Filter */
+#define SAMPLER_COEF_NB 8
+#define HDA_ANA_SRC_Y_CFG_ALT_2X 0x01130000
+static u32 coef_y_alt_2x[] = {
+ 0x00FE83FB, 0x1F900401, 0x00000000, 0x00000000,
+ 0x00F408F9, 0x055F7C25, 0x00000000, 0x00000000
+};
+
+#define HDA_ANA_SRC_C_CFG_ALT_2X 0x01750004
+static u32 coef_c_alt_2x[] = {
+ 0x001305F7, 0x05274BD0, 0x00000000, 0x00000000,
+ 0x0004907C, 0x09C80B9D, 0x00000000, 0x00000000
+};
+
+/* Upsampler values for the 4X Filter */
+#define HDA_ANA_SRC_Y_CFG_4X 0x01ED0005
+#define HDA_ANA_SRC_C_CFG_4X 0x01ED0004
+static u32 coef_yc_4x[] = {
+ 0x00FC827F, 0x008FE20B, 0x00F684FC, 0x050F7C24,
+ 0x00F4857C, 0x0A1F402E, 0x00FA027F, 0x0E076E1D
+};
+
+/* AWG instructions for some video modes */
+#define AWG_MAX_INST 64
+
+/* 720p@50 */
+static u32 AWGi_720p_50[] = {
+ 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
+ 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
+ 0x00000D8E, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C26, 0x0000003B, 0x00000FB4, 0x00000FB5,
+ 0x00000104, 0x00001AE8
+};
+
+#define NN_720p_50 ARRAY_SIZE(AWGi_720p_50)
+
+/* 720p@60 */
+static u32 AWGi_720p_60[] = {
+ 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
+ 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
+ 0x00000C44, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C26, 0x0000003B, 0x00000F0F, 0x00000F10,
+ 0x00000104, 0x00001AE8
+};
+
+#define NN_720p_60 ARRAY_SIZE(AWGi_720p_60)
+
+/* 1080p@30 */
+static u32 AWGi_1080p_30[] = {
+ 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
+ 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
+ 0x00000C2A, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C2A, 0x0000003B, 0x00000EBE, 0x00000EBF,
+ 0x00000EBF, 0x00000104, 0x00001A2F, 0x00001C4B,
+ 0x00001C52
+};
+
+#define NN_1080p_30 ARRAY_SIZE(AWGi_1080p_30)
+
+/* 1080p@25 */
+static u32 AWGi_1080p_25[] = {
+ 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
+ 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
+ 0x00000DE2, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C2A, 0x0000003B, 0x00000F51, 0x00000F51,
+ 0x00000F52, 0x00000104, 0x00001A2F, 0x00001C4B,
+ 0x00001C52
+};
+
+#define NN_1080p_25 ARRAY_SIZE(AWGi_1080p_25)
+
+/* 1080p@24 */
+static u32 AWGi_1080p_24[] = {
+ 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
+ 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
+ 0x00000E50, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C2A, 0x0000003B, 0x00000F76, 0x00000F76,
+ 0x00000F76, 0x00000104, 0x00001A2F, 0x00001C4B,
+ 0x00001C52
+};
+
+#define NN_1080p_24 ARRAY_SIZE(AWGi_1080p_24)
+
+/* 720x480p@60 */
+static u32 AWGi_720x480p_60[] = {
+ 0x00000904, 0x00000F18, 0x0000013B, 0x00001805,
+ 0x00000904, 0x00000C3D, 0x0000003B, 0x00001A06
+};
+
+#define NN_720x480p_60 ARRAY_SIZE(AWGi_720x480p_60)
+
+/* Video mode category */
+enum sti_hda_vid_cat {
+ VID_SD,
+ VID_ED,
+ VID_HD_74M,
+ VID_HD_148M
+};
+
+struct sti_hda_video_config {
+ struct drm_display_mode mode;
+ u32 *awg_instr;
+ int nb_instr;
+ enum sti_hda_vid_cat vid_cat;
+};
+
+/* HD analog supported modes
+ * Interlaced modes may be added when supported by the whole display chain
+ */
+static const struct sti_hda_video_config hda_supported_modes[] = {
+ /* 1080p30 74.250Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
+ /* 1080p30 74.176Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
+ /* 1080p24 74.250Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
+ /* 1080p24 74.176Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
+ /* 1080p25 74.250Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_25, NN_1080p_25, VID_HD_74M},
+ /* 720p60 74.250Mhz */
+ {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_720p_60, NN_720p_60, VID_HD_74M},
+ /* 720p60 74.176Mhz */
+ {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74176, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_720p_60, NN_720p_60, VID_HD_74M},
+ /* 720p50 74.250Mhz */
+ {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_720p_50, NN_720p_50, VID_HD_74M},
+ /* 720x480p60 27.027Mhz */
+ {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27027, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ AWGi_720x480p_60, NN_720x480p_60, VID_ED},
+ /* 720x480p60 27.000Mhz */
+ {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ AWGi_720x480p_60, NN_720x480p_60, VID_ED}
+};
+
+/**
+ * STI hd analog structure
+ *
+ * @dev: driver device
+ * @drm_dev: pointer to drm device
+ * @mode: current display mode selected
+ * @regs: HD analog register
+ * @video_dacs_ctrl: video DACS control register
+ * @enabled: true if HD analog is enabled else false
+ */
+struct sti_hda {
+ struct device dev;
+ struct drm_device *drm_dev;
+ struct drm_display_mode mode;
+ void __iomem *regs;
+ void __iomem *video_dacs_ctrl;
+ struct clk *clk_pix;
+ struct clk *clk_hddac;
+ bool enabled;
+};
+
+struct sti_hda_connector {
+ struct drm_connector drm_connector;
+ struct drm_encoder *encoder;
+ struct sti_hda *hda;
+};
+
+#define to_sti_hda_connector(x) \
+ container_of(x, struct sti_hda_connector, drm_connector)
+
+static u32 hda_read(struct sti_hda *hda, int offset)
+{
+ return readl(hda->regs + offset);
+}
+
+static void hda_write(struct sti_hda *hda, u32 val, int offset)
+{
+ writel(val, hda->regs + offset);
+}
+
+/**
+ * Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ * @idx: index of the found mode
+ *
+ * Return true if mode is found
+ */
+static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++)
+ if (drm_mode_equal(&hda_supported_modes[i].mode, &mode)) {
+ *idx = i;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * Enable the HD DACS
+ *
+ * @hda: pointer to HD analog structure
+ * @enable: true if HD DACS need to be enabled, else false
+ */
+static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
+{
+ u32 mask;
+
+ if (hda->video_dacs_ctrl) {
+ u32 val;
+
+ switch ((u32)hda->video_dacs_ctrl & VIDEO_DACS_CONTROL_MASK) {
+ case VIDEO_DACS_CONTROL_SYSCFG2535:
+ mask = DAC_CFG_HD_OFF_MASK;
+ break;
+ case VIDEO_DACS_CONTROL_SYSCFG5072:
+ mask = DAC_CFG_HD_HZUVW_OFF_MASK;
+ break;
+ default:
+ DRM_INFO("Video DACS control register not supported!");
+ return;
+ }
+
+ val = readl(hda->video_dacs_ctrl);
+ if (enable)
+ val &= ~mask;
+ else
+ val |= mask;
+
+ writel(val, hda->video_dacs_ctrl);
+ }
+}
+
+/**
+ * Configure AWG, writing instructions
+ *
+ * @hda: pointer to HD analog structure
+ * @awg_instr: pointer to AWG instructions table
+ * @nb: nb of AWG instructions
+ */
+static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb)
+{
+ unsigned int i;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ for (i = 0; i < nb; i++)
+ hda_write(hda, awg_instr[i], HDA_SYNC_AWGI + i * 4);
+ for (i = nb; i < AWG_MAX_INST; i++)
+ hda_write(hda, 0, HDA_SYNC_AWGI + i * 4);
+}
+
+static void sti_hda_disable(struct drm_bridge *bridge)
+{
+ struct sti_hda *hda = bridge->driver_private;
+ u32 val;
+
+ if (!hda->enabled)
+ return;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Disable HD DAC and AWG */
+ val = hda_read(hda, HDA_ANA_CFG);
+ val &= ~CFG_AWG_ASYNC_EN;
+ hda_write(hda, val, HDA_ANA_CFG);
+ hda_write(hda, 0, HDA_ANA_ANC_CTRL);
+
+ hda_enable_hd_dacs(hda, false);
+
+ /* Disable/unprepare hda clock */
+ clk_disable_unprepare(hda->clk_hddac);
+ clk_disable_unprepare(hda->clk_pix);
+
+ hda->enabled = false;
+}
+
+static void sti_hda_pre_enable(struct drm_bridge *bridge)
+{
+ struct sti_hda *hda = bridge->driver_private;
+ u32 val, i, mode_idx;
+ u32 src_filter_y, src_filter_c;
+ u32 *coef_y, *coef_c;
+ u32 filter_mode;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (hda->enabled)
+ return;
+
+ /* Prepare/enable clocks */
+ if (clk_prepare_enable(hda->clk_pix))
+ DRM_ERROR("Failed to prepare/enable hda_pix clk\n");
+ if (clk_prepare_enable(hda->clk_hddac))
+ DRM_ERROR("Failed to prepare/enable hda_hddac clk\n");
+
+ if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ DRM_ERROR("Undefined mode\n");
+ return;
+ }
+
+ switch (hda_supported_modes[mode_idx].vid_cat) {
+ case VID_HD_148M:
+ DRM_ERROR("Beyond HD analog capabilities\n");
+ return;
+ case VID_HD_74M:
+ /* HD use alternate 2x filter */
+ filter_mode = CFG_AWG_FLTR_MODE_HD;
+ src_filter_y = HDA_ANA_SRC_Y_CFG_ALT_2X;
+ src_filter_c = HDA_ANA_SRC_C_CFG_ALT_2X;
+ coef_y = coef_y_alt_2x;
+ coef_c = coef_c_alt_2x;
+ break;
+ case VID_ED:
+ /* ED uses 4x filter */
+ filter_mode = CFG_AWG_FLTR_MODE_ED;
+ src_filter_y = HDA_ANA_SRC_Y_CFG_4X;
+ src_filter_c = HDA_ANA_SRC_C_CFG_4X;
+ coef_y = coef_yc_4x;
+ coef_c = coef_yc_4x;
+ break;
+ case VID_SD:
+ DRM_ERROR("Not supported\n");
+ return;
+ default:
+ DRM_ERROR("Undefined resolution\n");
+ return;
+ }
+ DRM_DEBUG_DRIVER("Using HDA mode #%d\n", mode_idx);
+
+ /* Enable HD Video DACs */
+ hda_enable_hd_dacs(hda, true);
+
+ /* Configure scaler */
+ hda_write(hda, SCALE_CTRL_Y_DFLT, HDA_ANA_SCALE_CTRL_Y);
+ hda_write(hda, SCALE_CTRL_CB_DFLT, HDA_ANA_SCALE_CTRL_CB);
+ hda_write(hda, SCALE_CTRL_CR_DFLT, HDA_ANA_SCALE_CTRL_CR);
+
+ /* Configure sampler */
+ hda_write(hda , src_filter_y, HDA_ANA_SRC_Y_CFG);
+ hda_write(hda, src_filter_c, HDA_ANA_SRC_C_CFG);
+ for (i = 0; i < SAMPLER_COEF_NB; i++) {
+ hda_write(hda, coef_y[i], HDA_COEFF_Y_PH1_TAP123 + i * 4);
+ hda_write(hda, coef_c[i], HDA_COEFF_C_PH1_TAP123 + i * 4);
+ }
+
+ /* Configure main HDFormatter */
+ val = 0;
+ val |= (hda->mode.flags & DRM_MODE_FLAG_INTERLACE) ?
+ 0 : CFG_AWG_ASYNC_VSYNC_MTD;
+ val |= (CFG_PBPR_SYNC_OFF_VAL << CFG_PBPR_SYNC_OFF_SHIFT);
+ val |= filter_mode;
+ hda_write(hda, val, HDA_ANA_CFG);
+
+ /* Configure AWG */
+ sti_hda_configure_awg(hda, hda_supported_modes[mode_idx].awg_instr,
+ hda_supported_modes[mode_idx].nb_instr);
+
+ /* Enable AWG */
+ val = hda_read(hda, HDA_ANA_CFG);
+ val |= CFG_AWG_ASYNC_EN;
+ hda_write(hda, val, HDA_ANA_CFG);
+
+ hda->enabled = true;
+}
+
+static void sti_hda_set_mode(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sti_hda *hda = bridge->driver_private;
+ u32 mode_idx;
+ int hddac_rate;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ memcpy(&hda->mode, mode, sizeof(struct drm_display_mode));
+
+ if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ DRM_ERROR("Undefined mode\n");
+ return;
+ }
+
+ switch (hda_supported_modes[mode_idx].vid_cat) {
+ case VID_HD_74M:
+ /* HD use alternate 2x filter */
+ hddac_rate = mode->clock * 1000 * 2;
+ break;
+ case VID_ED:
+ /* ED uses 4x filter */
+ hddac_rate = mode->clock * 1000 * 4;
+ break;
+ default:
+ DRM_ERROR("Undefined mode\n");
+ return;
+ }
+
+ /* HD DAC = 148.5Mhz or 108 Mhz */
+ ret = clk_set_rate(hda->clk_hddac, hddac_rate);
+ if (ret < 0)
+ DRM_ERROR("Cannot set rate (%dHz) for hda_hddac clk\n",
+ hddac_rate);
+
+ /* HDformatter clock = compositor clock */
+ ret = clk_set_rate(hda->clk_pix, mode->clock * 1000);
+ if (ret < 0)
+ DRM_ERROR("Cannot set rate (%dHz) for hda_pix clk\n",
+ mode->clock * 1000);
+}
+
+static void sti_hda_bridge_nope(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static void sti_hda_brigde_destroy(struct drm_bridge *bridge)
+{
+ drm_bridge_cleanup(bridge);
+ kfree(bridge);
+}
+
+static const struct drm_bridge_funcs sti_hda_bridge_funcs = {
+ .pre_enable = sti_hda_pre_enable,
+ .enable = sti_hda_bridge_nope,
+ .disable = sti_hda_disable,
+ .post_disable = sti_hda_bridge_nope,
+ .mode_set = sti_hda_set_mode,
+ .destroy = sti_hda_brigde_destroy,
+};
+
+static int sti_hda_connector_get_modes(struct drm_connector *connector)
+{
+ unsigned int i;
+ int count = 0;
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+ struct sti_hda *hda = hda_connector->hda;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(hda->drm_dev,
+ &hda_supported_modes[i].mode);
+ if (!mode)
+ continue;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ /* the first mode is the preferred mode */
+ if (i == 0)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ count++;
+ }
+
+ drm_mode_sort(&connector->modes);
+
+ return count;
+}
+
+#define CLK_TOLERANCE_HZ 50
+
+static int sti_hda_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+ int target_max = target + CLK_TOLERANCE_HZ;
+ int result;
+ int idx;
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+ struct sti_hda *hda = hda_connector->hda;
+
+ if (!hda_get_mode_idx(*mode, &idx)) {
+ return MODE_BAD;
+ } else {
+ result = clk_round_rate(hda->clk_pix, target);
+
+ DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
+ target, result);
+
+ if ((result < target_min) || (result > target_max)) {
+ DRM_DEBUG_DRIVER("hda pixclk=%d not supported\n",
+ target);
+ return MODE_BAD;
+ }
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
+{
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+
+ /* Best encoder is the one associated during connector creation */
+ return hda_connector->encoder;
+}
+
+static struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
+ .get_modes = sti_hda_connector_get_modes,
+ .mode_valid = sti_hda_connector_mode_valid,
+ .best_encoder = sti_hda_best_encoder,
+};
+
+static enum drm_connector_status
+sti_hda_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void sti_hda_connector_destroy(struct drm_connector *connector)
+{
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(hda_connector);
+}
+
+static struct drm_connector_funcs sti_hda_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = sti_hda_connector_detect,
+ .destroy = sti_hda_connector_destroy,
+};
+
+static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int sti_hda_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_hda *hda = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder;
+ struct sti_hda_connector *connector;
+ struct drm_connector *drm_connector;
+ struct drm_bridge *bridge;
+ int err;
+
+ /* Set the drm device handle */
+ hda->drm_dev = drm_dev;
+
+ encoder = sti_hda_find_encoder(drm_dev);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return -ENOMEM;
+
+ connector->hda = hda;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver_private = hda;
+ drm_bridge_init(drm_dev, bridge, &sti_hda_bridge_funcs);
+
+ encoder->bridge = bridge;
+ connector->encoder = encoder;
+
+ drm_connector = (struct drm_connector *)connector;
+
+ drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm_dev, drm_connector,
+ &sti_hda_connector_funcs, DRM_MODE_CONNECTOR_Component);
+ drm_connector_helper_add(drm_connector,
+ &sti_hda_connector_helper_funcs);
+
+ err = drm_connector_register(drm_connector);
+ if (err)
+ goto err_connector;
+
+ err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ if (err) {
+ DRM_ERROR("Failed to attach a connector to a encoder\n");
+ goto err_sysfs;
+ }
+
+ return 0;
+
+err_sysfs:
+ drm_connector_unregister(drm_connector);
+err_connector:
+ drm_bridge_cleanup(bridge);
+ drm_connector_cleanup(drm_connector);
+ return -EINVAL;
+}
+
+static void sti_hda_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_hda_ops = {
+ .bind = sti_hda_bind,
+ .unbind = sti_hda_unbind,
+};
+
+static int sti_hda_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sti_hda *hda;
+ struct resource *res;
+
+ DRM_INFO("%s\n", __func__);
+
+ hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL);
+ if (!hda)
+ return -ENOMEM;
+
+ hda->dev = pdev->dev;
+
+ /* Get resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
+ if (!res) {
+ DRM_ERROR("Invalid hda resource\n");
+ return -ENOMEM;
+ }
+ hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR(hda->regs))
+ return PTR_ERR(hda->regs);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "video-dacs-ctrl");
+ if (res) {
+ hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (IS_ERR(hda->video_dacs_ctrl))
+ return PTR_ERR(hda->video_dacs_ctrl);
+ } else {
+ /* If no existing video-dacs-ctrl resource continue the probe */
+ DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n");
+ hda->video_dacs_ctrl = NULL;
+ }
+
+ /* Get clock resources */
+ hda->clk_pix = devm_clk_get(dev, "pix");
+ if (IS_ERR(hda->clk_pix)) {
+ DRM_ERROR("Cannot get hda_pix clock\n");
+ return PTR_ERR(hda->clk_pix);
+ }
+
+ hda->clk_hddac = devm_clk_get(dev, "hddac");
+ if (IS_ERR(hda->clk_hddac)) {
+ DRM_ERROR("Cannot get hda_hddac clock\n");
+ return PTR_ERR(hda->clk_hddac);
+ }
+
+ platform_set_drvdata(pdev, hda);
+
+ return component_add(&pdev->dev, &sti_hda_ops);
+}
+
+static int sti_hda_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_hda_ops);
+ return 0;
+}
+
+static struct of_device_id hda_of_match[] = {
+ { .compatible = "st,stih416-hda", },
+ { .compatible = "st,stih407-hda", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, hda_of_match);
+
+struct platform_driver sti_hda_driver = {
+ .driver = {
+ .name = "sti-hda",
+ .owner = THIS_MODULE,
+ .of_match_table = hda_of_match,
+ },
+ .probe = sti_hda_probe,
+ .remove = sti_hda_remove,
+};
+
+module_platform_driver(sti_hda_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
new file mode 100644
index 000000000000..284e541d970d
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -0,0 +1,810 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/hdmi.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+#include "sti_hdmi.h"
+#include "sti_hdmi_tx3g4c28phy.h"
+#include "sti_hdmi_tx3g0c55phy.h"
+#include "sti_vtg.h"
+
+#define HDMI_CFG 0x0000
+#define HDMI_INT_EN 0x0004
+#define HDMI_INT_STA 0x0008
+#define HDMI_INT_CLR 0x000C
+#define HDMI_STA 0x0010
+#define HDMI_ACTIVE_VID_XMIN 0x0100
+#define HDMI_ACTIVE_VID_XMAX 0x0104
+#define HDMI_ACTIVE_VID_YMIN 0x0108
+#define HDMI_ACTIVE_VID_YMAX 0x010C
+#define HDMI_DFLT_CHL0_DAT 0x0110
+#define HDMI_DFLT_CHL1_DAT 0x0114
+#define HDMI_DFLT_CHL2_DAT 0x0118
+#define HDMI_SW_DI_1_HEAD_WORD 0x0210
+#define HDMI_SW_DI_1_PKT_WORD0 0x0214
+#define HDMI_SW_DI_1_PKT_WORD1 0x0218
+#define HDMI_SW_DI_1_PKT_WORD2 0x021C
+#define HDMI_SW_DI_1_PKT_WORD3 0x0220
+#define HDMI_SW_DI_1_PKT_WORD4 0x0224
+#define HDMI_SW_DI_1_PKT_WORD5 0x0228
+#define HDMI_SW_DI_1_PKT_WORD6 0x022C
+#define HDMI_SW_DI_CFG 0x0230
+
+#define HDMI_IFRAME_SLOT_AVI 1
+
+#define XCAT(prefix, x, suffix) prefix ## x ## suffix
+#define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD)
+#define HDMI_SW_DI_N_PKT_WORD0(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD0)
+#define HDMI_SW_DI_N_PKT_WORD1(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD1)
+#define HDMI_SW_DI_N_PKT_WORD2(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD2)
+#define HDMI_SW_DI_N_PKT_WORD3(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD3)
+#define HDMI_SW_DI_N_PKT_WORD4(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD4)
+#define HDMI_SW_DI_N_PKT_WORD5(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD5)
+#define HDMI_SW_DI_N_PKT_WORD6(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD6)
+
+#define HDMI_IFRAME_DISABLED 0x0
+#define HDMI_IFRAME_SINGLE_SHOT 0x1
+#define HDMI_IFRAME_FIELD 0x2
+#define HDMI_IFRAME_FRAME 0x3
+#define HDMI_IFRAME_MASK 0x3
+#define HDMI_IFRAME_CFG_DI_N(x, n) ((x) << ((n-1)*4)) /* n from 1 to 6 */
+
+#define HDMI_CFG_DEVICE_EN BIT(0)
+#define HDMI_CFG_HDMI_NOT_DVI BIT(1)
+#define HDMI_CFG_HDCP_EN BIT(2)
+#define HDMI_CFG_ESS_NOT_OESS BIT(3)
+#define HDMI_CFG_H_SYNC_POL_NEG BIT(4)
+#define HDMI_CFG_SINK_TERM_DET_EN BIT(5)
+#define HDMI_CFG_V_SYNC_POL_NEG BIT(6)
+#define HDMI_CFG_422_EN BIT(8)
+#define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12)
+#define HDMI_CFG_FIFO_UNDERRUN_CLR BIT(13)
+#define HDMI_CFG_SW_RST_EN BIT(31)
+
+#define HDMI_INT_GLOBAL BIT(0)
+#define HDMI_INT_SW_RST BIT(1)
+#define HDMI_INT_PIX_CAP BIT(3)
+#define HDMI_INT_HOT_PLUG BIT(4)
+#define HDMI_INT_DLL_LCK BIT(5)
+#define HDMI_INT_NEW_FRAME BIT(6)
+#define HDMI_INT_GENCTRL_PKT BIT(7)
+#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
+
+#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
+ | HDMI_INT_DLL_LCK \
+ | HDMI_INT_HOT_PLUG \
+ | HDMI_INT_GLOBAL)
+
+#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
+ | HDMI_INT_GENCTRL_PKT \
+ | HDMI_INT_NEW_FRAME \
+ | HDMI_INT_DLL_LCK \
+ | HDMI_INT_HOT_PLUG \
+ | HDMI_INT_PIX_CAP \
+ | HDMI_INT_SW_RST \
+ | HDMI_INT_GLOBAL)
+
+#define HDMI_STA_SW_RST BIT(1)
+
+struct sti_hdmi_connector {
+ struct drm_connector drm_connector;
+ struct drm_encoder *encoder;
+ struct sti_hdmi *hdmi;
+};
+
+#define to_sti_hdmi_connector(x) \
+ container_of(x, struct sti_hdmi_connector, drm_connector)
+
+u32 hdmi_read(struct sti_hdmi *hdmi, int offset)
+{
+ return readl(hdmi->regs + offset);
+}
+
+void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset)
+{
+ writel(val, hdmi->regs + offset);
+}
+
+/**
+ * HDMI interrupt handler threaded
+ *
+ * @irq: irq number
+ * @arg: connector structure
+ */
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
+{
+ struct sti_hdmi *hdmi = arg;
+
+ /* Hot plug/unplug IRQ */
+ if (hdmi->irq_status & HDMI_INT_HOT_PLUG) {
+ /* read gpio to get the status */
+ hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+ if (hdmi->drm_dev)
+ drm_helper_hpd_irq_event(hdmi->drm_dev);
+ }
+
+ /* Sw reset and PLL lock are exclusive so we can use the same
+ * event to signal them
+ */
+ if (hdmi->irq_status & (HDMI_INT_SW_RST | HDMI_INT_DLL_LCK)) {
+ hdmi->event_received = true;
+ wake_up_interruptible(&hdmi->wait_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * HDMI interrupt handler
+ *
+ * @irq: irq number
+ * @arg: connector structure
+ */
+static irqreturn_t hdmi_irq(int irq, void *arg)
+{
+ struct sti_hdmi *hdmi = arg;
+
+ /* read interrupt status */
+ hdmi->irq_status = hdmi_read(hdmi, HDMI_INT_STA);
+
+ /* clear interrupt status */
+ hdmi_write(hdmi, hdmi->irq_status, HDMI_INT_CLR);
+
+ /* force sync bus write */
+ hdmi_read(hdmi, HDMI_INT_STA);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * Set hdmi active area depending on the drm display mode selected
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void hdmi_active_area(struct sti_hdmi *hdmi)
+{
+ u32 xmin, xmax;
+ u32 ymin, ymax;
+
+ xmin = sti_vtg_get_pixel_number(hdmi->mode, 0);
+ xmax = sti_vtg_get_pixel_number(hdmi->mode, hdmi->mode.hdisplay - 1);
+ ymin = sti_vtg_get_line_number(hdmi->mode, 0);
+ ymax = sti_vtg_get_line_number(hdmi->mode, hdmi->mode.vdisplay - 1);
+
+ hdmi_write(hdmi, xmin, HDMI_ACTIVE_VID_XMIN);
+ hdmi_write(hdmi, xmax, HDMI_ACTIVE_VID_XMAX);
+ hdmi_write(hdmi, ymin, HDMI_ACTIVE_VID_YMIN);
+ hdmi_write(hdmi, ymax, HDMI_ACTIVE_VID_YMAX);
+}
+
+/**
+ * Overall hdmi configuration
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void hdmi_config(struct sti_hdmi *hdmi)
+{
+ u32 conf;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Clear overrun and underrun fifo */
+ conf = HDMI_CFG_FIFO_OVERRUN_CLR | HDMI_CFG_FIFO_UNDERRUN_CLR;
+
+ /* Enable HDMI mode not DVI */
+ conf |= HDMI_CFG_HDMI_NOT_DVI | HDMI_CFG_ESS_NOT_OESS;
+
+ /* Enable sink term detection */
+ conf |= HDMI_CFG_SINK_TERM_DET_EN;
+
+ /* Set Hsync polarity */
+ if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) {
+ DRM_DEBUG_DRIVER("H Sync Negative\n");
+ conf |= HDMI_CFG_H_SYNC_POL_NEG;
+ }
+
+ /* Set Vsync polarity */
+ if (hdmi->mode.flags & DRM_MODE_FLAG_NVSYNC) {
+ DRM_DEBUG_DRIVER("V Sync Negative\n");
+ conf |= HDMI_CFG_V_SYNC_POL_NEG;
+ }
+
+ /* Enable HDMI */
+ conf |= HDMI_CFG_DEVICE_EN;
+
+ hdmi_write(hdmi, conf, HDMI_CFG);
+}
+
+/**
+ * Prepare and configure the AVI infoframe
+ *
+ * AVI infoframe are transmitted at least once per two video field and
+ * contains information about HDMI transmission mode such as color space,
+ * colorimetry, ...
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return negative value if error occurs
+ */
+static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
+{
+ struct drm_display_mode *mode = &hdmi->mode;
+ struct hdmi_avi_infoframe infoframe;
+ u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ u8 *frame = buffer + HDMI_INFOFRAME_HEADER_SIZE;
+ u32 val;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode);
+ if (ret < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %d\n", ret);
+ return ret;
+ }
+
+ /* fixed infoframe configuration not linked to the mode */
+ infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
+
+ ret = hdmi_avi_infoframe_pack(&infoframe, buffer, sizeof(buffer));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
+ return ret;
+ }
+
+ /* Disable transmission slot for AVI infoframe */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, HDMI_IFRAME_SLOT_AVI);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+ /* Infoframe header */
+ val = buffer[0x0];
+ val |= buffer[0x1] << 8;
+ val |= buffer[0x2] << 16;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI));
+
+ /* Infoframe packet bytes */
+ val = frame[0x0];
+ val |= frame[0x1] << 8;
+ val |= frame[0x2] << 16;
+ val |= frame[0x3] << 24;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI));
+
+ val = frame[0x4];
+ val |= frame[0x5] << 8;
+ val |= frame[0x6] << 16;
+ val |= frame[0x7] << 24;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI));
+
+ val = frame[0x8];
+ val |= frame[0x9] << 8;
+ val |= frame[0xA] << 16;
+ val |= frame[0xB] << 24;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
+
+ val = frame[0xC];
+ val |= frame[0xD] << 8;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
+
+ /* Enable transmission slot for AVI infoframe
+ * According to the hdmi specification, AVI infoframe should be
+ * transmitted at least once per two video fields
+ */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, HDMI_IFRAME_SLOT_AVI);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+ return 0;
+}
+
+/**
+ * Software reset of the hdmi subsystem
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ */
+#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
+static void hdmi_swreset(struct sti_hdmi *hdmi)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Enable hdmi_audio clock only during hdmi reset */
+ if (clk_prepare_enable(hdmi->clk_audio))
+ DRM_INFO("Failed to prepare/enable hdmi_audio clk\n");
+
+ /* Sw reset */
+ hdmi->event_received = false;
+
+ val = hdmi_read(hdmi, HDMI_CFG);
+ val |= HDMI_CFG_SW_RST_EN;
+ hdmi_write(hdmi, val, HDMI_CFG);
+
+ /* Wait reset completed */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_SWRESET));
+
+ /*
+ * HDMI_STA_SW_RST bit is set to '1' when SW_RST bit in HDMI_CFG is
+ * set to '1' and clk_audio is running.
+ */
+ if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_SW_RST) == 0)
+ DRM_DEBUG_DRIVER("Warning: HDMI sw reset timeout occurs\n");
+
+ val = hdmi_read(hdmi, HDMI_CFG);
+ val &= ~HDMI_CFG_SW_RST_EN;
+ hdmi_write(hdmi, val, HDMI_CFG);
+
+ /* Disable hdmi_audio clock. Not used anymore for drm purpose */
+ clk_disable_unprepare(hdmi->clk_audio);
+}
+
+static void sti_hdmi_disable(struct drm_bridge *bridge)
+{
+ struct sti_hdmi *hdmi = bridge->driver_private;
+
+ u32 val = hdmi_read(hdmi, HDMI_CFG);
+
+ if (!hdmi->enabled)
+ return;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Disable HDMI */
+ val &= ~HDMI_CFG_DEVICE_EN;
+ hdmi_write(hdmi, val, HDMI_CFG);
+
+ hdmi_write(hdmi, 0xffffffff, HDMI_INT_CLR);
+
+ /* Stop the phy */
+ hdmi->phy_ops->stop(hdmi);
+
+ /* Set the default channel data to be a dark red */
+ hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL0_DAT);
+ hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL1_DAT);
+ hdmi_write(hdmi, 0x0060, HDMI_DFLT_CHL2_DAT);
+
+ /* Disable/unprepare hdmi clock */
+ clk_disable_unprepare(hdmi->clk_phy);
+ clk_disable_unprepare(hdmi->clk_tmds);
+ clk_disable_unprepare(hdmi->clk_pix);
+
+ hdmi->enabled = false;
+}
+
+static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
+{
+ struct sti_hdmi *hdmi = bridge->driver_private;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (hdmi->enabled)
+ return;
+
+ /* Prepare/enable clocks */
+ if (clk_prepare_enable(hdmi->clk_pix))
+ DRM_ERROR("Failed to prepare/enable hdmi_pix clk\n");
+ if (clk_prepare_enable(hdmi->clk_tmds))
+ DRM_ERROR("Failed to prepare/enable hdmi_tmds clk\n");
+ if (clk_prepare_enable(hdmi->clk_phy))
+ DRM_ERROR("Failed to prepare/enable hdmi_rejec_pll clk\n");
+
+ hdmi->enabled = true;
+
+ /* Program hdmi serializer and start phy */
+ if (!hdmi->phy_ops->start(hdmi)) {
+ DRM_ERROR("Unable to start hdmi phy\n");
+ return;
+ }
+
+ /* Program hdmi active area */
+ hdmi_active_area(hdmi);
+
+ /* Enable working interrupts */
+ hdmi_write(hdmi, HDMI_WORKING_INT, HDMI_INT_EN);
+
+ /* Program hdmi config */
+ hdmi_config(hdmi);
+
+ /* Program AVI infoframe */
+ if (hdmi_avi_infoframe_config(hdmi))
+ DRM_ERROR("Unable to configure AVI infoframe\n");
+
+ /* Sw reset */
+ hdmi_swreset(hdmi);
+}
+
+static void sti_hdmi_set_mode(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sti_hdmi *hdmi = bridge->driver_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Copy the drm display mode in the connector local structure */
+ memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode));
+
+ /* Update clock framerate according to the selected mode */
+ ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000);
+ if (ret < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for hdmi_pix clk\n",
+ mode->clock * 1000);
+ return;
+ }
+ ret = clk_set_rate(hdmi->clk_phy, mode->clock * 1000);
+ if (ret < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for hdmi_rejection_pll clk\n",
+ mode->clock * 1000);
+ return;
+ }
+}
+
+static void sti_hdmi_bridge_nope(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static void sti_hdmi_brigde_destroy(struct drm_bridge *bridge)
+{
+ drm_bridge_cleanup(bridge);
+ kfree(bridge);
+}
+
+static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
+ .pre_enable = sti_hdmi_pre_enable,
+ .enable = sti_hdmi_bridge_nope,
+ .disable = sti_hdmi_disable,
+ .post_disable = sti_hdmi_bridge_nope,
+ .mode_set = sti_hdmi_set_mode,
+ .destroy = sti_hdmi_brigde_destroy,
+};
+
+static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct i2c_adapter *i2c_adap;
+ struct edid *edid;
+ int count;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ i2c_adap = i2c_get_adapter(1);
+ if (!i2c_adap)
+ goto fail;
+
+ edid = drm_get_edid(connector, i2c_adap);
+ if (!edid)
+ goto fail;
+
+ count = drm_add_edid_modes(connector, edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ kfree(edid);
+ return count;
+
+fail:
+ DRM_ERROR("Can not read HDMI EDID\n");
+ return 0;
+}
+
+#define CLK_TOLERANCE_HZ 50
+
+static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+ int target_max = target + CLK_TOLERANCE_HZ;
+ int result;
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+
+ result = clk_round_rate(hdmi->clk_pix, target);
+
+ DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
+ target, result);
+
+ if ((result < target_min) || (result > target_max)) {
+ DRM_DEBUG_DRIVER("hdmi pixclk=%d not supported\n", target);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+
+ /* Best encoder is the one associated during connector creation */
+ return hdmi_connector->encoder;
+}
+
+static struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
+ .get_modes = sti_hdmi_connector_get_modes,
+ .mode_valid = sti_hdmi_connector_mode_valid,
+ .best_encoder = sti_hdmi_best_encoder,
+};
+
+/* get detection status of display device */
+static enum drm_connector_status
+sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (hdmi->hpd) {
+ DRM_DEBUG_DRIVER("hdmi cable connected\n");
+ return connector_status_connected;
+ }
+
+ DRM_DEBUG_DRIVER("hdmi cable disconnected\n");
+ return connector_status_disconnected;
+}
+
+static void sti_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(hdmi_connector);
+}
+
+static struct drm_connector_funcs sti_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = sti_hdmi_connector_detect,
+ .destroy = sti_hdmi_connector_destroy,
+};
+
+static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder;
+ struct sti_hdmi_connector *connector;
+ struct drm_connector *drm_connector;
+ struct drm_bridge *bridge;
+ struct i2c_adapter *i2c_adap;
+ int err;
+
+ i2c_adap = i2c_get_adapter(1);
+ if (!i2c_adap)
+ return -EPROBE_DEFER;
+
+ /* Set the drm device handle */
+ hdmi->drm_dev = drm_dev;
+
+ encoder = sti_hdmi_find_encoder(drm_dev);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return -ENOMEM;
+
+ connector->hdmi = hdmi;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver_private = hdmi;
+ drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs);
+
+ encoder->bridge = bridge;
+ connector->encoder = encoder;
+
+ drm_connector = (struct drm_connector *)connector;
+
+ drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm_dev, drm_connector,
+ &sti_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(drm_connector,
+ &sti_hdmi_connector_helper_funcs);
+
+ err = drm_connector_register(drm_connector);
+ if (err)
+ goto err_connector;
+
+ err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ if (err) {
+ DRM_ERROR("Failed to attach a connector to a encoder\n");
+ goto err_sysfs;
+ }
+
+ /* Enable default interrupts */
+ hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
+
+ return 0;
+
+err_sysfs:
+ drm_connector_unregister(drm_connector);
+err_connector:
+ drm_bridge_cleanup(bridge);
+ drm_connector_cleanup(drm_connector);
+ return -EINVAL;
+}
+
+static void sti_hdmi_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_hdmi_ops = {
+ .bind = sti_hdmi_bind,
+ .unbind = sti_hdmi_unbind,
+};
+
+static struct of_device_id hdmi_of_match[] = {
+ {
+ .compatible = "st,stih416-hdmi",
+ .data = &tx3g0c55phy_ops,
+ }, {
+ .compatible = "st,stih407-hdmi",
+ .data = &tx3g4c28phy_ops,
+ }, {
+ /* end node */
+ }
+};
+MODULE_DEVICE_TABLE(of, hdmi_of_match);
+
+static int sti_hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sti_hdmi *hdmi;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ int ret;
+
+ DRM_INFO("%s\n", __func__);
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = pdev->dev;
+
+ /* Get resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
+ if (!res) {
+ DRM_ERROR("Invalid hdmi resource\n");
+ return -ENOMEM;
+ }
+ hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ if (of_device_is_compatible(np, "st,stih416-hdmi")) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "syscfg");
+ if (!res) {
+ DRM_ERROR("Invalid syscfg resource\n");
+ return -ENOMEM;
+ }
+ hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (IS_ERR(hdmi->syscfg))
+ return PTR_ERR(hdmi->syscfg);
+
+ }
+
+ hdmi->phy_ops = (struct hdmi_phy_ops *)
+ of_match_node(hdmi_of_match, np)->data;
+
+ /* Get clock resources */
+ hdmi->clk_pix = devm_clk_get(dev, "pix");
+ if (IS_ERR(hdmi->clk_pix)) {
+ DRM_ERROR("Cannot get hdmi_pix clock\n");
+ return PTR_ERR(hdmi->clk_pix);
+ }
+
+ hdmi->clk_tmds = devm_clk_get(dev, "tmds");
+ if (IS_ERR(hdmi->clk_tmds)) {
+ DRM_ERROR("Cannot get hdmi_tmds clock\n");
+ return PTR_ERR(hdmi->clk_tmds);
+ }
+
+ hdmi->clk_phy = devm_clk_get(dev, "phy");
+ if (IS_ERR(hdmi->clk_phy)) {
+ DRM_ERROR("Cannot get hdmi_phy clock\n");
+ return PTR_ERR(hdmi->clk_phy);
+ }
+
+ hdmi->clk_audio = devm_clk_get(dev, "audio");
+ if (IS_ERR(hdmi->clk_audio)) {
+ DRM_ERROR("Cannot get hdmi_audio clock\n");
+ return PTR_ERR(hdmi->clk_audio);
+ }
+
+ hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0);
+ if (hdmi->hpd_gpio < 0) {
+ DRM_ERROR("Failed to get hdmi hpd-gpio\n");
+ return -EIO;
+ }
+
+ hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+
+ init_waitqueue_head(&hdmi->wait_event);
+
+ hdmi->irq = platform_get_irq_byname(pdev, "irq");
+
+ ret = devm_request_threaded_irq(dev, hdmi->irq, hdmi_irq,
+ hdmi_irq_thread, IRQF_ONESHOT, dev_name(dev), hdmi);
+ if (ret) {
+ DRM_ERROR("Failed to register HDMI interrupt\n");
+ return ret;
+ }
+
+ hdmi->reset = devm_reset_control_get(dev, "hdmi");
+ /* Take hdmi out of reset */
+ if (!IS_ERR(hdmi->reset))
+ reset_control_deassert(hdmi->reset);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return component_add(&pdev->dev, &sti_hdmi_ops);
+}
+
+static int sti_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_hdmi_ops);
+ return 0;
+}
+
+struct platform_driver sti_hdmi_driver = {
+ .driver = {
+ .name = "sti-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = hdmi_of_match,
+ },
+ .probe = sti_hdmi_probe,
+ .remove = sti_hdmi_remove,
+};
+
+module_platform_driver(sti_hdmi_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
new file mode 100644
index 000000000000..61bec6557ceb
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HDMI_H_
+#define _STI_HDMI_H_
+
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+
+#define HDMI_STA 0x0010
+#define HDMI_STA_DLL_LCK BIT(5)
+
+struct sti_hdmi;
+
+struct hdmi_phy_ops {
+ bool (*start)(struct sti_hdmi *hdmi);
+ void (*stop)(struct sti_hdmi *hdmi);
+};
+
+/**
+ * STI hdmi structure
+ *
+ * @dev: driver device
+ * @drm_dev: pointer to drm device
+ * @mode: current display mode selected
+ * @regs: hdmi register
+ * @syscfg: syscfg register for pll rejection configuration
+ * @clk_pix: hdmi pixel clock
+ * @clk_tmds: hdmi tmds clock
+ * @clk_phy: hdmi phy clock
+ * @clk_audio: hdmi audio clock
+ * @irq: hdmi interrupt number
+ * @irq_status: interrupt status register
+ * @phy_ops: phy start/stop operations
+ * @enabled: true if hdmi is enabled else false
+ * @hpd_gpio: hdmi hot plug detect gpio number
+ * @hpd: hot plug detect status
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @reset: reset control of the hdmi phy
+ */
+struct sti_hdmi {
+ struct device dev;
+ struct drm_device *drm_dev;
+ struct drm_display_mode mode;
+ void __iomem *regs;
+ void __iomem *syscfg;
+ struct clk *clk_pix;
+ struct clk *clk_tmds;
+ struct clk *clk_phy;
+ struct clk *clk_audio;
+ int irq;
+ u32 irq_status;
+ struct hdmi_phy_ops *phy_ops;
+ bool enabled;
+ int hpd_gpio;
+ bool hpd;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ struct reset_control *reset;
+};
+
+u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
+void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset);
+
+/**
+ * hdmi phy config structure
+ *
+ * A pointer to an array of these structures is passed to a TMDS (HDMI) output
+ * via the control interface to provide board and SoC specific
+ * configurations of the HDMI PHY. Each entry in the array specifies a hardware
+ * specific configuration for a given TMDS clock frequency range.
+ *
+ * @min_tmds_freq: Lower bound of TMDS clock frequency this entry applies to
+ * @max_tmds_freq: Upper bound of TMDS clock frequency this entry applies to
+ * @config: SoC specific register configuration
+ */
+struct hdmi_phy_config {
+ u32 min_tmds_freq;
+ u32 max_tmds_freq;
+ u32 config[4];
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
new file mode 100644
index 000000000000..49ae8e44b285
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_hdmi_tx3g0c55phy.h"
+
+#define HDMI_SRZ_PLL_CFG 0x0504
+#define HDMI_SRZ_TAP_1 0x0508
+#define HDMI_SRZ_TAP_2 0x050C
+#define HDMI_SRZ_TAP_3 0x0510
+#define HDMI_SRZ_CTRL 0x0514
+
+#define HDMI_SRZ_PLL_CFG_POWER_DOWN BIT(0)
+#define HDMI_SRZ_PLL_CFG_VCOR_SHIFT 1
+#define HDMI_SRZ_PLL_CFG_VCOR_425MHZ 0
+#define HDMI_SRZ_PLL_CFG_VCOR_850MHZ 1
+#define HDMI_SRZ_PLL_CFG_VCOR_1700MHZ 2
+#define HDMI_SRZ_PLL_CFG_VCOR_3000MHZ 3
+#define HDMI_SRZ_PLL_CFG_VCOR_MASK 3
+#define HDMI_SRZ_PLL_CFG_VCOR(x) (x << HDMI_SRZ_PLL_CFG_VCOR_SHIFT)
+#define HDMI_SRZ_PLL_CFG_NDIV_SHIFT 8
+#define HDMI_SRZ_PLL_CFG_NDIV_MASK (0x1F << HDMI_SRZ_PLL_CFG_NDIV_SHIFT)
+#define HDMI_SRZ_PLL_CFG_MODE_SHIFT 16
+#define HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ 0x1
+#define HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ 0x4
+#define HDMI_SRZ_PLL_CFG_MODE_27_MHZ 0x5
+#define HDMI_SRZ_PLL_CFG_MODE_33_75_MHZ 0x6
+#define HDMI_SRZ_PLL_CFG_MODE_40_5_MHZ 0x7
+#define HDMI_SRZ_PLL_CFG_MODE_54_MHZ 0x8
+#define HDMI_SRZ_PLL_CFG_MODE_67_5_MHZ 0x9
+#define HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ 0xA
+#define HDMI_SRZ_PLL_CFG_MODE_81_MHZ 0xB
+#define HDMI_SRZ_PLL_CFG_MODE_82_5_MHZ 0xC
+#define HDMI_SRZ_PLL_CFG_MODE_108_MHZ 0xD
+#define HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ 0xE
+#define HDMI_SRZ_PLL_CFG_MODE_165_MHZ 0xF
+#define HDMI_SRZ_PLL_CFG_MODE_MASK 0xF
+#define HDMI_SRZ_PLL_CFG_MODE(x) (x << HDMI_SRZ_PLL_CFG_MODE_SHIFT)
+
+#define HDMI_SRZ_CTRL_POWER_DOWN (1 << 0)
+#define HDMI_SRZ_CTRL_EXTERNAL_DATA_EN (1 << 1)
+
+/* sysconf registers */
+#define HDMI_REJECTION_PLL_CONFIGURATION 0x0858 /* SYSTEM_CONFIG2534 */
+#define HDMI_REJECTION_PLL_STATUS 0x0948 /* SYSTEM_CONFIG2594 */
+
+#define REJECTION_PLL_HDMI_ENABLE_SHIFT 0
+#define REJECTION_PLL_HDMI_ENABLE_MASK (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT)
+#define REJECTION_PLL_HDMI_PDIV_SHIFT 24
+#define REJECTION_PLL_HDMI_PDIV_MASK (0x7 << REJECTION_PLL_HDMI_PDIV_SHIFT)
+#define REJECTION_PLL_HDMI_NDIV_SHIFT 16
+#define REJECTION_PLL_HDMI_NDIV_MASK (0xFF << REJECTION_PLL_HDMI_NDIV_SHIFT)
+#define REJECTION_PLL_HDMI_MDIV_SHIFT 8
+#define REJECTION_PLL_HDMI_MDIV_MASK (0xFF << REJECTION_PLL_HDMI_MDIV_SHIFT)
+
+#define REJECTION_PLL_HDMI_REJ_PLL_LOCK BIT(0)
+
+#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
+
+/**
+ * pll mode structure
+ *
+ * A pointer to an array of these structures is passed to a TMDS (HDMI) output
+ * via the control interface to provide board and SoC specific
+ * configurations of the HDMI PHY. Each entry in the array specifies a hardware
+ * specific configuration for a given TMDS clock frequency range. The array
+ * should be terminated with an entry that has all fields set to zero.
+ *
+ * @min: Lower bound of TMDS clock frequency this entry applies to
+ * @max: Upper bound of TMDS clock frequency this entry applies to
+ * @mode: SoC specific register configuration
+ */
+struct pllmode {
+ u32 min;
+ u32 max;
+ u32 mode;
+};
+
+#define NB_PLL_MODE 7
+static struct pllmode pllmodes[NB_PLL_MODE] = {
+ {13500000, 13513500, HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ},
+ {25174800, 25200000, HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ},
+ {27000000, 27027000, HDMI_SRZ_PLL_CFG_MODE_27_MHZ},
+ {54000000, 54054000, HDMI_SRZ_PLL_CFG_MODE_54_MHZ},
+ {72000000, 74250000, HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ},
+ {108000000, 108108000, HDMI_SRZ_PLL_CFG_MODE_108_MHZ},
+ {148351648, 297000000, HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ}
+};
+
+#define NB_HDMI_PHY_CONFIG 5
+static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
+ {0, 40000000, {0x00101010, 0x00101010, 0x00101010, 0x02} },
+ {40000000, 140000000, {0x00111111, 0x00111111, 0x00111111, 0x02} },
+ {140000000, 160000000, {0x00131313, 0x00101010, 0x00101010, 0x02} },
+ {160000000, 250000000, {0x00131313, 0x00111111, 0x00111111, 0x03FE} },
+ {250000000, 300000000, {0x00151515, 0x00101010, 0x00101010, 0x03FE} },
+};
+
+#define PLL_CHANGE_DELAY 1 /* ms */
+
+/**
+ * Disable the pll rejection
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * return true if the pll has been disabled
+ */
+static bool disable_pll_rejection(struct sti_hdmi *hdmi)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+ val &= ~REJECTION_PLL_HDMI_ENABLE_MASK;
+ writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+
+ msleep(PLL_CHANGE_DELAY);
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
+
+ return !(val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
+}
+
+/**
+ * Enable the old BCH/rejection PLL is now reused to provide the CLKPXPLL
+ * clock input to the new PHY PLL that generates the serializer clock
+ * (TMDS*10) and the TMDS clock which is now fed back into the HDMI
+ * formatter instead of the TMDS clock line from ClockGenB.
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * return true if pll has been correctly set
+ */
+static bool enable_pll_rejection(struct sti_hdmi *hdmi)
+{
+ unsigned int inputclock;
+ u32 mdiv, ndiv, pdiv, val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!disable_pll_rejection(hdmi))
+ return false;
+
+ inputclock = hdmi->mode.clock * 1000;
+
+ DRM_DEBUG_DRIVER("hdmi rejection pll input clock = %dHz\n", inputclock);
+
+
+ /* Power up the HDMI rejection PLL
+ * Note: On this SoC (stiH416) we are forced to have the input clock
+ * be equal to the HDMI pixel clock.
+ *
+ * The values here have been suggested by validation however they are
+ * still provisional and subject to change.
+ *
+ * PLLout = (Fin*Mdiv) / ((2 * Ndiv) / 2^Pdiv)
+ */
+ if (inputclock < 50000000) {
+ /*
+ * For slower clocks we need to multiply more to keep the
+ * internal VCO frequency within the physical specification
+ * of the PLL.
+ */
+ pdiv = 4;
+ ndiv = 240;
+ mdiv = 30;
+ } else {
+ pdiv = 2;
+ ndiv = 60;
+ mdiv = 30;
+ }
+
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+
+ val &= ~(REJECTION_PLL_HDMI_PDIV_MASK |
+ REJECTION_PLL_HDMI_NDIV_MASK |
+ REJECTION_PLL_HDMI_MDIV_MASK |
+ REJECTION_PLL_HDMI_ENABLE_MASK);
+
+ val |= (pdiv << REJECTION_PLL_HDMI_PDIV_SHIFT) |
+ (ndiv << REJECTION_PLL_HDMI_NDIV_SHIFT) |
+ (mdiv << REJECTION_PLL_HDMI_MDIV_SHIFT) |
+ (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT);
+
+ writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+
+ msleep(PLL_CHANGE_DELAY);
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
+
+ return (val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
+}
+
+/**
+ * Start hdmi phy macro cell tx3g0c55
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return false if an error occur
+ */
+static bool sti_hdmi_tx3g0c55phy_start(struct sti_hdmi *hdmi)
+{
+ u32 ckpxpll = hdmi->mode.clock * 1000;
+ u32 val, tmdsck, freqvco, pllctrl = 0;
+ unsigned int i;
+
+ if (!enable_pll_rejection(hdmi))
+ return false;
+
+ DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
+
+ /* Assuming no pixel repetition and 24bits color */
+ tmdsck = ckpxpll;
+ pllctrl = 2 << HDMI_SRZ_PLL_CFG_NDIV_SHIFT;
+
+ /*
+ * Setup the PLL mode parameter based on the ckpxpll. If we haven't got
+ * a clock frequency supported by one of the specific PLL modes then we
+ * will end up using the generic mode (0) which only supports a 10x
+ * multiplier, hence only 24bit color.
+ */
+ for (i = 0; i < NB_PLL_MODE; i++) {
+ if (ckpxpll >= pllmodes[i].min && ckpxpll <= pllmodes[i].max)
+ pllctrl |= HDMI_SRZ_PLL_CFG_MODE(pllmodes[i].mode);
+ }
+
+ freqvco = tmdsck * 10;
+ if (freqvco <= 425000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_425MHZ);
+ else if (freqvco <= 850000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_850MHZ);
+ else if (freqvco <= 1700000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_1700MHZ);
+ else if (freqvco <= 2970000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_3000MHZ);
+ else {
+ DRM_ERROR("PHY serializer clock out of range\n");
+ goto err;
+ }
+
+ /*
+ * Configure and power up the PHY PLL
+ */
+ hdmi->event_received = false;
+ DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
+ hdmi_write(hdmi, pllctrl, HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
+ DRM_ERROR("hdmi phy pll not locked\n");
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
+
+ /*
+ * To configure the source termination and pre-emphasis appropriately
+ * for different high speed TMDS clock frequencies a phy configuration
+ * table must be provided, tailored to the SoC and board combination.
+ */
+ for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
+ if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
+ (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
+ val = hdmiphy_config[i].config[0];
+ hdmi_write(hdmi, val, HDMI_SRZ_TAP_1);
+ val = hdmiphy_config[i].config[1];
+ hdmi_write(hdmi, val, HDMI_SRZ_TAP_2);
+ val = hdmiphy_config[i].config[2];
+ hdmi_write(hdmi, val, HDMI_SRZ_TAP_3);
+ val = hdmiphy_config[i].config[3];
+ val |= HDMI_SRZ_CTRL_EXTERNAL_DATA_EN;
+ val &= ~HDMI_SRZ_CTRL_POWER_DOWN;
+ hdmi_write(hdmi, val, HDMI_SRZ_CTRL);
+
+ DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x 0x%x\n",
+ hdmiphy_config[i].config[0],
+ hdmiphy_config[i].config[1],
+ hdmiphy_config[i].config[2],
+ hdmiphy_config[i].config[3]);
+ return true;
+ }
+ }
+
+ /*
+ * Default, power up the serializer with no pre-emphasis or source
+ * termination.
+ */
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_1);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_2);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_3);
+ hdmi_write(hdmi, HDMI_SRZ_CTRL_EXTERNAL_DATA_EN, HDMI_SRZ_CTRL);
+
+ return true;
+
+err:
+ disable_pll_rejection(hdmi);
+
+ return false;
+}
+
+/**
+ * Stop hdmi phy macro cell tx3g0c55
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void sti_hdmi_tx3g0c55phy_stop(struct sti_hdmi *hdmi)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ hdmi->event_received = false;
+
+ hdmi_write(hdmi, HDMI_SRZ_CTRL_POWER_DOWN, HDMI_SRZ_CTRL);
+ hdmi_write(hdmi, HDMI_SRZ_PLL_CFG_POWER_DOWN, HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
+ DRM_ERROR("hdmi phy pll not well disabled\n");
+
+ disable_pll_rejection(hdmi);
+}
+
+struct hdmi_phy_ops tx3g0c55phy_ops = {
+ .start = sti_hdmi_tx3g0c55phy_start,
+ .stop = sti_hdmi_tx3g0c55phy_stop,
+};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
new file mode 100644
index 000000000000..068237b3a303
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HDMI_TX3G0C55PHY_H_
+#define _STI_HDMI_TX3G0C55PHY_H_
+
+#include "sti_hdmi.h"
+
+extern struct hdmi_phy_ops tx3g0c55phy_ops;
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
new file mode 100644
index 000000000000..8e0ceb0ced33
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_hdmi_tx3g4c28phy.h"
+
+#define HDMI_SRZ_CFG 0x504
+#define HDMI_SRZ_PLL_CFG 0x510
+#define HDMI_SRZ_ICNTL 0x518
+#define HDMI_SRZ_CALCODE_EXT 0x520
+
+#define HDMI_SRZ_CFG_EN BIT(0)
+#define HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT BIT(1)
+#define HDMI_SRZ_CFG_EXTERNAL_DATA BIT(16)
+#define HDMI_SRZ_CFG_RBIAS_EXT BIT(17)
+#define HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION BIT(18)
+#define HDMI_SRZ_CFG_EN_BIASRES_DETECTION BIT(19)
+#define HDMI_SRZ_CFG_EN_SRC_TERMINATION BIT(24)
+
+#define HDMI_SRZ_CFG_INTERNAL_MASK (HDMI_SRZ_CFG_EN | \
+ HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT | \
+ HDMI_SRZ_CFG_EXTERNAL_DATA | \
+ HDMI_SRZ_CFG_RBIAS_EXT | \
+ HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION | \
+ HDMI_SRZ_CFG_EN_BIASRES_DETECTION | \
+ HDMI_SRZ_CFG_EN_SRC_TERMINATION)
+
+#define PLL_CFG_EN BIT(0)
+#define PLL_CFG_NDIV_SHIFT (8)
+#define PLL_CFG_IDF_SHIFT (16)
+#define PLL_CFG_ODF_SHIFT (24)
+
+#define ODF_DIV_1 (0)
+#define ODF_DIV_2 (1)
+#define ODF_DIV_4 (2)
+#define ODF_DIV_8 (3)
+
+#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
+
+struct plldividers_s {
+ uint32_t min;
+ uint32_t max;
+ uint32_t idf;
+ uint32_t odf;
+};
+
+/*
+ * Functional specification recommended values
+ */
+#define NB_PLL_MODE 5
+static struct plldividers_s plldividers[NB_PLL_MODE] = {
+ {0, 20000000, 1, ODF_DIV_8},
+ {20000000, 42500000, 2, ODF_DIV_8},
+ {42500000, 85000000, 4, ODF_DIV_4},
+ {85000000, 170000000, 8, ODF_DIV_2},
+ {170000000, 340000000, 16, ODF_DIV_1}
+};
+
+#define NB_HDMI_PHY_CONFIG 2
+static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
+ {0, 250000000, {0x0, 0x0, 0x0, 0x0} },
+ {250000000, 300000000, {0x1110, 0x0, 0x0, 0x0} },
+};
+
+/**
+ * Start hdmi phy macro cell tx3g4c28
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return false if an error occur
+ */
+static bool sti_hdmi_tx3g4c28phy_start(struct sti_hdmi *hdmi)
+{
+ u32 ckpxpll = hdmi->mode.clock * 1000;
+ u32 val, tmdsck, idf, odf, pllctrl = 0;
+ bool foundplldivides = false;
+ int i;
+
+ DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
+
+ for (i = 0; i < NB_PLL_MODE; i++) {
+ if (ckpxpll >= plldividers[i].min &&
+ ckpxpll < plldividers[i].max) {
+ idf = plldividers[i].idf;
+ odf = plldividers[i].odf;
+ foundplldivides = true;
+ break;
+ }
+ }
+
+ if (!foundplldivides) {
+ DRM_ERROR("input TMDS clock speed (%d) not supported\n",
+ ckpxpll);
+ goto err;
+ }
+
+ /* Assuming no pixel repetition and 24bits color */
+ tmdsck = ckpxpll;
+ pllctrl |= 40 << PLL_CFG_NDIV_SHIFT;
+
+ if (tmdsck > 340000000) {
+ DRM_ERROR("output TMDS clock (%d) out of range\n", tmdsck);
+ goto err;
+ }
+
+ pllctrl |= idf << PLL_CFG_IDF_SHIFT;
+ pllctrl |= odf << PLL_CFG_ODF_SHIFT;
+
+ /*
+ * Configure and power up the PHY PLL
+ */
+ hdmi->event_received = false;
+ DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
+ hdmi_write(hdmi, (pllctrl | PLL_CFG_EN), HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
+ DRM_ERROR("hdmi phy pll not locked\n");
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
+
+ val = (HDMI_SRZ_CFG_EN |
+ HDMI_SRZ_CFG_EXTERNAL_DATA |
+ HDMI_SRZ_CFG_EN_BIASRES_DETECTION |
+ HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION);
+
+ if (tmdsck > 165000000)
+ val |= HDMI_SRZ_CFG_EN_SRC_TERMINATION;
+
+ /*
+ * To configure the source termination and pre-emphasis appropriately
+ * for different high speed TMDS clock frequencies a phy configuration
+ * table must be provided, tailored to the SoC and board combination.
+ */
+ for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
+ if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
+ (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
+ val |= (hdmiphy_config[i].config[0]
+ & ~HDMI_SRZ_CFG_INTERNAL_MASK);
+ hdmi_write(hdmi, val, HDMI_SRZ_CFG);
+
+ val = hdmiphy_config[i].config[1];
+ hdmi_write(hdmi, val, HDMI_SRZ_ICNTL);
+
+ val = hdmiphy_config[i].config[2];
+ hdmi_write(hdmi, val, HDMI_SRZ_CALCODE_EXT);
+
+ DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x\n",
+ hdmiphy_config[i].config[0],
+ hdmiphy_config[i].config[1],
+ hdmiphy_config[i].config[2]);
+ return true;
+ }
+ }
+
+ /*
+ * Default, power up the serializer with no pre-emphasis or
+ * output swing correction
+ */
+ hdmi_write(hdmi, val, HDMI_SRZ_CFG);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_ICNTL);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_CALCODE_EXT);
+
+ return true;
+
+err:
+ return false;
+}
+
+/**
+ * Stop hdmi phy macro cell tx3g4c28
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void sti_hdmi_tx3g4c28phy_stop(struct sti_hdmi *hdmi)
+{
+ int val = 0;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hdmi->event_received = false;
+
+ val = HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION;
+ val |= HDMI_SRZ_CFG_EN_BIASRES_DETECTION;
+
+ hdmi_write(hdmi, val, HDMI_SRZ_CFG);
+ hdmi_write(hdmi, 0, HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
+ DRM_ERROR("hdmi phy pll not well disabled\n");
+}
+
+struct hdmi_phy_ops tx3g4c28phy_ops = {
+ .start = sti_hdmi_tx3g4c28phy_start,
+ .stop = sti_hdmi_tx3g4c28phy_stop,
+};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
new file mode 100644
index 000000000000..f99a7ff281ef
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HDMI_TX3G4C28PHY_H_
+#define _STI_HDMI_TX3G4C28PHY_H_
+
+#include "sti_hdmi.h"
+
+extern struct hdmi_phy_ops tx3g4c28phy_ops;
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
new file mode 100644
index 000000000000..06a587c4f1bb
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_layer.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_gdp.h"
+#include "sti_layer.h"
+#include "sti_vid.h"
+
+const char *sti_layer_to_str(struct sti_layer *layer)
+{
+ switch (layer->desc) {
+ case STI_GDP_0:
+ return "GDP0";
+ case STI_GDP_1:
+ return "GDP1";
+ case STI_GDP_2:
+ return "GDP2";
+ case STI_GDP_3:
+ return "GDP3";
+ case STI_VID_0:
+ return "VID0";
+ case STI_VID_1:
+ return "VID1";
+ case STI_CURSOR:
+ return "CURSOR";
+ default:
+ return "<UNKNOWN LAYER>";
+ }
+}
+
+struct sti_layer *sti_layer_create(struct device *dev, int desc,
+ void __iomem *baseaddr)
+{
+
+ struct sti_layer *layer = NULL;
+
+ switch (desc & STI_LAYER_TYPE_MASK) {
+ case STI_GDP:
+ layer = sti_gdp_create(dev, desc);
+ break;
+ case STI_VID:
+ layer = sti_vid_create(dev);
+ break;
+ }
+
+ if (!layer) {
+ DRM_ERROR("Failed to create layer\n");
+ return NULL;
+ }
+
+ layer->desc = desc;
+ layer->dev = dev;
+ layer->regs = baseaddr;
+
+ layer->ops->init(layer);
+
+ DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
+
+ return layer;
+}
+
+int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+ struct drm_display_mode *mode, int mixer_id,
+ int dest_x, int dest_y, int dest_w, int dest_h,
+ int src_x, int src_y, int src_w, int src_h)
+{
+ int ret;
+ unsigned int i;
+ struct drm_gem_cma_object *cma_obj;
+
+ if (!layer || !fb || !mode) {
+ DRM_ERROR("Null fb, layer or mode\n");
+ return 1;
+ }
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return 1;
+ }
+
+ layer->fb = fb;
+ layer->mode = mode;
+ layer->mixer_id = mixer_id;
+ layer->dst_x = dest_x;
+ layer->dst_y = dest_y;
+ layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
+ layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
+ layer->src_x = src_x;
+ layer->src_y = src_y;
+ layer->src_w = src_w;
+ layer->src_h = src_h;
+ layer->format = fb->pixel_format;
+ layer->paddr = cma_obj->paddr;
+ for (i = 0; i < 4; i++) {
+ layer->pitches[i] = fb->pitches[i];
+ layer->offsets[i] = fb->offsets[i];
+ }
+
+ DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
+ sti_layer_to_str(layer),
+ layer->mixer_id);
+ DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+ sti_layer_to_str(layer),
+ layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
+ layer->src_w, layer->src_h, layer->src_x,
+ layer->src_y);
+
+ DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+ (char *)&layer->format, (unsigned long)layer->paddr);
+
+ if (!layer->ops->prepare)
+ goto err_no_prepare;
+
+ ret = layer->ops->prepare(layer, !layer->enabled);
+ if (!ret)
+ layer->enabled = true;
+
+ return ret;
+
+err_no_prepare:
+ DRM_ERROR("Cannot prepare\n");
+ return 1;
+}
+
+int sti_layer_commit(struct sti_layer *layer)
+{
+ if (!layer)
+ return 1;
+
+ if (!layer->ops->commit)
+ goto err_no_commit;
+
+ return layer->ops->commit(layer);
+
+err_no_commit:
+ DRM_ERROR("Cannot commit\n");
+ return 1;
+}
+
+int sti_layer_disable(struct sti_layer *layer)
+{
+ int ret;
+
+ DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+ if (!layer)
+ return 1;
+
+ if (!layer->enabled)
+ return 0;
+
+ if (!layer->ops->disable)
+ goto err_no_disable;
+
+ ret = layer->ops->disable(layer);
+ if (!ret)
+ layer->enabled = false;
+ else
+ DRM_ERROR("Disable failed\n");
+
+ return ret;
+
+err_no_disable:
+ DRM_ERROR("Cannot disable\n");
+ return 1;
+}
+
+const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
+{
+ if (!layer)
+ return NULL;
+
+ if (!layer->ops->get_formats)
+ return NULL;
+
+ return layer->ops->get_formats(layer);
+}
+
+unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
+{
+ if (!layer)
+ return 0;
+
+ if (!layer->ops->get_nb_formats)
+ return 0;
+
+ return layer->ops->get_nb_formats(layer);
+}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
new file mode 100644
index 000000000000..198c3774cc12
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_layer.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_LAYER_H_
+#define _STI_LAYER_H_
+
+#include <drm/drmP.h>
+
+#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
+
+#define STI_LAYER_TYPE_SHIFT 8
+#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
+
+struct sti_layer;
+
+enum sti_layer_type {
+ STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
+ STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
+ STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
+ STI_BCK = 4 << STI_LAYER_TYPE_SHIFT
+};
+
+enum sti_layer_id_of_type {
+ STI_ID_0 = 0,
+ STI_ID_1 = 1,
+ STI_ID_2 = 2,
+ STI_ID_3 = 3
+};
+
+enum sti_layer_desc {
+ STI_GDP_0 = STI_GDP | STI_ID_0,
+ STI_GDP_1 = STI_GDP | STI_ID_1,
+ STI_GDP_2 = STI_GDP | STI_ID_2,
+ STI_GDP_3 = STI_GDP | STI_ID_3,
+ STI_VID_0 = STI_VID | STI_ID_0,
+ STI_VID_1 = STI_VID | STI_ID_1,
+ STI_CURSOR = STI_CUR,
+ STI_BACK = STI_BCK
+};
+
+/**
+ * STI layer functions structure
+ *
+ * @get_formats: get layer supported formats
+ * @get_nb_formats: get number of format supported
+ * @init: initialize the layer
+ * @prepare: prepare layer before rendering
+ * @commit: set layer for rendering
+ * @disable: disable layer
+ */
+struct sti_layer_funcs {
+ const uint32_t* (*get_formats)(struct sti_layer *layer);
+ unsigned int (*get_nb_formats)(struct sti_layer *layer);
+ void (*init)(struct sti_layer *layer);
+ int (*prepare)(struct sti_layer *layer, bool first_prepare);
+ int (*commit)(struct sti_layer *layer);
+ int (*disable)(struct sti_layer *layer);
+};
+
+/**
+ * STI layer structure
+ *
+ * @plane: drm plane it is bound to (if any)
+ * @fb: drm fb it is bound to
+ * @mode: display mode
+ * @desc: layer type & id
+ * @device: driver device
+ * @regs: layer registers
+ * @ops: layer functions
+ * @zorder: layer z-order
+ * @mixer_id: id of the mixer used to display the layer
+ * @enabled: to know if the layer is active or not
+ * @src_x src_y: coordinates of the input (fb) area
+ * @src_w src_h: size of the input (fb) area
+ * @dst_x dst_y: coordinates of the output (crtc) area
+ * @dst_w dst_h: size of the output (crtc) area
+ * @format: format
+ * @pitches: pitch of 'planes' (eg: Y, U, V)
+ * @offsets: offset of 'planes'
+ * @paddr: physical address of the input buffer
+ */
+struct sti_layer {
+ struct drm_plane plane;
+ struct drm_framebuffer *fb;
+ struct drm_display_mode *mode;
+ enum sti_layer_desc desc;
+ struct device *dev;
+ void __iomem *regs;
+ const struct sti_layer_funcs *ops;
+ int zorder;
+ int mixer_id;
+ bool enabled;
+ int src_x, src_y;
+ int src_w, src_h;
+ int dst_x, dst_y;
+ int dst_w, dst_h;
+ uint32_t format;
+ unsigned int pitches[4];
+ unsigned int offsets[4];
+ dma_addr_t paddr;
+};
+
+struct sti_layer *sti_layer_create(struct device *dev, int desc,
+ void __iomem *baseaddr);
+int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ int mixer_id,
+ int dest_x, int dest_y,
+ int dest_w, int dest_h,
+ int src_x, int src_y,
+ int src_w, int src_h);
+int sti_layer_commit(struct sti_layer *layer);
+int sti_layer_disable(struct sti_layer *layer);
+const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
+unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
+const char *sti_layer_to_str(struct sti_layer *layer);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
new file mode 100644
index 000000000000..79f369db9fb6
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_compositor.h"
+#include "sti_mixer.h"
+#include "sti_vtg.h"
+
+/* Identity: G=Y , B=Cb , R=Cr */
+static const u32 mixerColorSpaceMatIdentity[] = {
+ 0x10000000, 0x00000000, 0x10000000, 0x00001000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+/* regs offset */
+#define GAM_MIXER_CTL 0x00
+#define GAM_MIXER_BKC 0x04
+#define GAM_MIXER_BCO 0x0C
+#define GAM_MIXER_BCS 0x10
+#define GAM_MIXER_AVO 0x28
+#define GAM_MIXER_AVS 0x2C
+#define GAM_MIXER_CRB 0x34
+#define GAM_MIXER_ACT 0x38
+#define GAM_MIXER_MBP 0x3C
+#define GAM_MIXER_MX0 0x80
+
+/* id for depth of CRB reg */
+#define GAM_DEPTH_VID0_ID 1
+#define GAM_DEPTH_VID1_ID 2
+#define GAM_DEPTH_GDP0_ID 3
+#define GAM_DEPTH_GDP1_ID 4
+#define GAM_DEPTH_GDP2_ID 5
+#define GAM_DEPTH_GDP3_ID 6
+#define GAM_DEPTH_MASK_ID 7
+
+/* mask in CTL reg */
+#define GAM_CTL_BACK_MASK BIT(0)
+#define GAM_CTL_VID0_MASK BIT(1)
+#define GAM_CTL_VID1_MASK BIT(2)
+#define GAM_CTL_GDP0_MASK BIT(3)
+#define GAM_CTL_GDP1_MASK BIT(4)
+#define GAM_CTL_GDP2_MASK BIT(5)
+#define GAM_CTL_GDP3_MASK BIT(6)
+
+const char *sti_mixer_to_str(struct sti_mixer *mixer)
+{
+ switch (mixer->id) {
+ case STI_MIXER_MAIN:
+ return "MAIN_MIXER";
+ case STI_MIXER_AUX:
+ return "AUX_MIXER";
+ default:
+ return "<UNKNOWN MIXER>";
+ }
+}
+
+static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
+{
+ return readl(mixer->regs + reg_id);
+}
+
+static inline void sti_mixer_reg_write(struct sti_mixer *mixer,
+ u32 reg_id, u32 val)
+{
+ writel(val, mixer->regs + reg_id);
+}
+
+void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
+{
+ u32 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
+
+ val &= ~GAM_CTL_BACK_MASK;
+ val |= enable;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
+}
+
+static void sti_mixer_set_background_color(struct sti_mixer *mixer,
+ u8 red, u8 green, u8 blue)
+{
+ u32 val = (red << 16) | (green << 8) | blue;
+
+ sti_mixer_reg_write(mixer, GAM_MIXER_BKC, val);
+}
+
+static void sti_mixer_set_background_area(struct sti_mixer *mixer,
+ struct drm_display_mode *mode)
+{
+ u32 ydo, xdo, yds, xds;
+
+ ydo = sti_vtg_get_line_number(*mode, 0);
+ yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, 0);
+ xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+
+ sti_mixer_reg_write(mixer, GAM_MIXER_BCO, ydo << 16 | xdo);
+ sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
+}
+
+int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
+{
+ int layer_id = 0, depth = layer->zorder;
+ u32 mask, val;
+
+ if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
+ return 1;
+
+ switch (layer->desc) {
+ case STI_GDP_0:
+ layer_id = GAM_DEPTH_GDP0_ID;
+ break;
+ case STI_GDP_1:
+ layer_id = GAM_DEPTH_GDP1_ID;
+ break;
+ case STI_GDP_2:
+ layer_id = GAM_DEPTH_GDP2_ID;
+ break;
+ case STI_GDP_3:
+ layer_id = GAM_DEPTH_GDP3_ID;
+ break;
+ case STI_VID_0:
+ layer_id = GAM_DEPTH_VID0_ID;
+ break;
+ case STI_VID_1:
+ layer_id = GAM_DEPTH_VID1_ID;
+ break;
+ default:
+ DRM_ERROR("Unknown layer %d\n", layer->desc);
+ return 1;
+ }
+ mask = GAM_DEPTH_MASK_ID << (3 * depth);
+ layer_id = layer_id << (3 * depth);
+
+ DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
+ sti_layer_to_str(layer), depth);
+ dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
+ layer_id, mask);
+
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
+ val &= ~mask;
+ val |= layer_id;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
+
+ dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
+ sti_mixer_reg_read(mixer, GAM_MIXER_CRB));
+ return 0;
+}
+
+int sti_mixer_active_video_area(struct sti_mixer *mixer,
+ struct drm_display_mode *mode)
+{
+ u32 ydo, xdo, yds, xds;
+
+ ydo = sti_vtg_get_line_number(*mode, 0);
+ yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, 0);
+ xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+
+ DRM_DEBUG_DRIVER("%s active video area xdo:%d ydo:%d xds:%d yds:%d\n",
+ sti_mixer_to_str(mixer), xdo, ydo, xds, yds);
+ sti_mixer_reg_write(mixer, GAM_MIXER_AVO, ydo << 16 | xdo);
+ sti_mixer_reg_write(mixer, GAM_MIXER_AVS, yds << 16 | xds);
+
+ sti_mixer_set_background_color(mixer, 0xFF, 0, 0);
+
+ sti_mixer_set_background_area(mixer, mode);
+ sti_mixer_set_background_status(mixer, true);
+ return 0;
+}
+
+static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
+{
+ switch (layer->desc) {
+ case STI_BACK:
+ return GAM_CTL_BACK_MASK;
+ case STI_GDP_0:
+ return GAM_CTL_GDP0_MASK;
+ case STI_GDP_1:
+ return GAM_CTL_GDP1_MASK;
+ case STI_GDP_2:
+ return GAM_CTL_GDP2_MASK;
+ case STI_GDP_3:
+ return GAM_CTL_GDP3_MASK;
+ case STI_VID_0:
+ return GAM_CTL_VID0_MASK;
+ case STI_VID_1:
+ return GAM_CTL_VID1_MASK;
+ default:
+ return 0;
+ }
+}
+
+int sti_mixer_set_layer_status(struct sti_mixer *mixer,
+ struct sti_layer *layer, bool status)
+{
+ u32 mask, val;
+
+ DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
+ sti_mixer_to_str(mixer), sti_layer_to_str(layer));
+
+ mask = sti_mixer_get_layer_mask(layer);
+ if (!mask) {
+ DRM_ERROR("Can not find layer mask\n");
+ return -EINVAL;
+ }
+
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
+ val &= ~mask;
+ val |= status ? mask : 0;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
+
+ return 0;
+}
+
+void sti_mixer_set_matrix(struct sti_mixer *mixer)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mixerColorSpaceMatIdentity); i++)
+ sti_mixer_reg_write(mixer, GAM_MIXER_MX0 + (i * 4),
+ mixerColorSpaceMatIdentity[i]);
+}
+
+struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+ void __iomem *baseaddr)
+{
+ struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
+ struct device_node *np = dev->of_node;
+
+ dev_dbg(dev, "%s\n", __func__);
+ if (!mixer) {
+ DRM_ERROR("Failed to allocated memory for mixer\n");
+ return NULL;
+ }
+ mixer->regs = baseaddr;
+ mixer->dev = dev;
+ mixer->id = id;
+
+ if (of_device_is_compatible(np, "st,stih416-compositor"))
+ sti_mixer_set_matrix(mixer);
+
+ DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
+ sti_mixer_to_str(mixer), mixer->regs);
+
+ return mixer;
+}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
new file mode 100644
index 000000000000..874372102e52
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_MIXER_H_
+#define _STI_MIXER_H_
+
+#include <drm/drmP.h>
+
+#include "sti_layer.h"
+
+#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
+
+/**
+ * STI Mixer subdevice structure
+ *
+ * @dev: driver device
+ * @regs: mixer registers
+ * @id: id of the mixer
+ * @drm_crtc: crtc object link to the mixer
+ * @pending_event: set if a flip event is pending on crtc
+ */
+struct sti_mixer {
+ struct device *dev;
+ void __iomem *regs;
+ int id;
+ struct drm_crtc drm_crtc;
+ struct drm_pending_vblank_event *pending_event;
+};
+
+const char *sti_mixer_to_str(struct sti_mixer *mixer);
+
+struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+ void __iomem *baseaddr);
+
+int sti_mixer_set_layer_status(struct sti_mixer *mixer,
+ struct sti_layer *layer, bool status);
+int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
+int sti_mixer_active_video_area(struct sti_mixer *mixer,
+ struct drm_display_mode *mode);
+
+void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
+
+/* depth in Cross-bar control = z order */
+#define GAM_MIXER_NB_DEPTH_LEVEL 7
+
+#define STI_MIXER_MAIN 0
+#define STI_MIXER_AUX 1
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
new file mode 100644
index 000000000000..b69e26fee76e
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Vincent Abriou <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+/* glue registers */
+#define TVO_CSC_MAIN_M0 0x000
+#define TVO_CSC_MAIN_M1 0x004
+#define TVO_CSC_MAIN_M2 0x008
+#define TVO_CSC_MAIN_M3 0x00c
+#define TVO_CSC_MAIN_M4 0x010
+#define TVO_CSC_MAIN_M5 0x014
+#define TVO_CSC_MAIN_M6 0x018
+#define TVO_CSC_MAIN_M7 0x01c
+#define TVO_MAIN_IN_VID_FORMAT 0x030
+#define TVO_CSC_AUX_M0 0x100
+#define TVO_CSC_AUX_M1 0x104
+#define TVO_CSC_AUX_M2 0x108
+#define TVO_CSC_AUX_M3 0x10c
+#define TVO_CSC_AUX_M4 0x110
+#define TVO_CSC_AUX_M5 0x114
+#define TVO_CSC_AUX_M6 0x118
+#define TVO_CSC_AUX_M7 0x11c
+#define TVO_AUX_IN_VID_FORMAT 0x130
+#define TVO_VIP_HDF 0x400
+#define TVO_HD_SYNC_SEL 0x418
+#define TVO_HD_DAC_CFG_OFF 0x420
+#define TVO_VIP_HDMI 0x500
+#define TVO_HDMI_FORCE_COLOR_0 0x504
+#define TVO_HDMI_FORCE_COLOR_1 0x508
+#define TVO_HDMI_CLIP_VALUE_B_CB 0x50c
+#define TVO_HDMI_CLIP_VALUE_Y_G 0x510
+#define TVO_HDMI_CLIP_VALUE_R_CR 0x514
+#define TVO_HDMI_SYNC_SEL 0x518
+#define TVO_HDMI_DFV_OBS 0x540
+
+#define TVO_IN_FMT_SIGNED BIT(0)
+#define TVO_SYNC_EXT BIT(4)
+
+#define TVO_VIP_REORDER_R_SHIFT 24
+#define TVO_VIP_REORDER_G_SHIFT 20
+#define TVO_VIP_REORDER_B_SHIFT 16
+#define TVO_VIP_REORDER_MASK 0x3
+#define TVO_VIP_REORDER_Y_G_SEL 0
+#define TVO_VIP_REORDER_CB_B_SEL 1
+#define TVO_VIP_REORDER_CR_R_SEL 2
+
+#define TVO_VIP_CLIP_SHIFT 8
+#define TVO_VIP_CLIP_MASK 0x7
+#define TVO_VIP_CLIP_DISABLED 0
+#define TVO_VIP_CLIP_EAV_SAV 1
+#define TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y 2
+#define TVO_VIP_CLIP_LIMITED_RANGE_CB_CR 3
+#define TVO_VIP_CLIP_PROG_RANGE 4
+
+#define TVO_VIP_RND_SHIFT 4
+#define TVO_VIP_RND_MASK 0x3
+#define TVO_VIP_RND_8BIT_ROUNDED 0
+#define TVO_VIP_RND_10BIT_ROUNDED 1
+#define TVO_VIP_RND_12BIT_ROUNDED 2
+
+#define TVO_VIP_SEL_INPUT_MASK 0xf
+#define TVO_VIP_SEL_INPUT_MAIN 0x0
+#define TVO_VIP_SEL_INPUT_AUX 0x8
+#define TVO_VIP_SEL_INPUT_FORCE_COLOR 0xf
+#define TVO_VIP_SEL_INPUT_BYPASS_MASK 0x1
+#define TVO_VIP_SEL_INPUT_BYPASSED 1
+
+#define TVO_SYNC_MAIN_VTG_SET_REF 0x00
+#define TVO_SYNC_MAIN_VTG_SET_1 0x01
+#define TVO_SYNC_MAIN_VTG_SET_2 0x02
+#define TVO_SYNC_MAIN_VTG_SET_3 0x03
+#define TVO_SYNC_MAIN_VTG_SET_4 0x04
+#define TVO_SYNC_MAIN_VTG_SET_5 0x05
+#define TVO_SYNC_MAIN_VTG_SET_6 0x06
+#define TVO_SYNC_AUX_VTG_SET_REF 0x10
+#define TVO_SYNC_AUX_VTG_SET_1 0x11
+#define TVO_SYNC_AUX_VTG_SET_2 0x12
+#define TVO_SYNC_AUX_VTG_SET_3 0x13
+#define TVO_SYNC_AUX_VTG_SET_4 0x14
+#define TVO_SYNC_AUX_VTG_SET_5 0x15
+#define TVO_SYNC_AUX_VTG_SET_6 0x16
+
+#define TVO_SYNC_HD_DCS_SHIFT 8
+
+#define ENCODER_MAIN_CRTC_MASK BIT(0)
+
+/* enum listing the supported output data format */
+enum sti_tvout_video_out_type {
+ STI_TVOUT_VIDEO_OUT_RGB,
+ STI_TVOUT_VIDEO_OUT_YUV,
+};
+
+struct sti_tvout {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ void __iomem *regs;
+ struct reset_control *reset;
+ struct drm_encoder *hdmi;
+ struct drm_encoder *hda;
+};
+
+struct sti_tvout_encoder {
+ struct drm_encoder encoder;
+ struct sti_tvout *tvout;
+};
+
+#define to_sti_tvout_encoder(x) \
+ container_of(x, struct sti_tvout_encoder, encoder)
+
+#define to_sti_tvout(x) to_sti_tvout_encoder(x)->tvout
+
+/* preformatter conversion matrix */
+static const u32 rgb_to_ycbcr_601[8] = {
+ 0xF927082E, 0x04C9FEAB, 0x01D30964, 0xFA95FD3D,
+ 0x0000082E, 0x00002000, 0x00002000, 0x00000000
+};
+
+/* 709 RGB to YCbCr */
+static const u32 rgb_to_ycbcr_709[8] = {
+ 0xF891082F, 0x0367FF40, 0x01280B71, 0xF9B1FE20,
+ 0x0000082F, 0x00002000, 0x00002000, 0x00000000
+};
+
+static u32 tvout_read(struct sti_tvout *tvout, int offset)
+{
+ return readl(tvout->regs + offset);
+}
+
+static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
+{
+ writel(val, tvout->regs + offset);
+}
+
+/**
+ * Set the clipping mode of a VIP
+ *
+ * @tvout: tvout structure
+ * @cr_r:
+ * @y_g:
+ * @cb_b:
+ */
+static void tvout_vip_set_color_order(struct sti_tvout *tvout,
+ u32 cr_r, u32 y_g, u32 cb_b)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT);
+ val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT);
+ val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT);
+ val |= cr_r << TVO_VIP_REORDER_R_SHIFT;
+ val |= y_g << TVO_VIP_REORDER_G_SHIFT;
+ val |= cb_b << TVO_VIP_REORDER_B_SHIFT;
+
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Set the clipping mode of a VIP
+ *
+ * @tvout: tvout structure
+ * @range: clipping range
+ */
+static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT);
+ val |= range << TVO_VIP_CLIP_SHIFT;
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Set the rounded value of a VIP
+ *
+ * @tvout: tvout structure
+ * @rnd: rounded val per component
+ */
+static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT);
+ val |= rnd << TVO_VIP_RND_SHIFT;
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Select the VIP input
+ *
+ * @tvout: tvout structure
+ * @sel_input: selected_input (main/aux + conv)
+ */
+static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
+ bool main_path,
+ bool sel_input_logic_inverted,
+ enum sti_tvout_video_out_type video_out)
+{
+ u32 sel_input;
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ if (main_path)
+ sel_input = TVO_VIP_SEL_INPUT_MAIN;
+ else
+ sel_input = TVO_VIP_SEL_INPUT_AUX;
+
+ switch (video_out) {
+ case STI_TVOUT_VIDEO_OUT_RGB:
+ sel_input |= TVO_VIP_SEL_INPUT_BYPASSED;
+ break;
+ case STI_TVOUT_VIDEO_OUT_YUV:
+ sel_input &= ~TVO_VIP_SEL_INPUT_BYPASSED;
+ break;
+ }
+
+ /* on stih407 chip the sel_input bypass mode logic is inverted */
+ if (sel_input_logic_inverted)
+ sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
+
+ val &= ~TVO_VIP_SEL_INPUT_MASK;
+ val |= sel_input;
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Select the input video signed or unsigned
+ *
+ * @tvout: tvout structure
+ * @in_vid_signed: used video input format
+ */
+static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~TVO_IN_FMT_SIGNED;
+ val |= in_vid_fmt;
+ tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT);
+}
+
+/**
+ * Start VIP block for HDMI output
+ *
+ * @tvout: pointer on tvout structure
+ * @main_path: true if main path has to be used in the vip configuration
+ * else aux path is used.
+ */
+static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
+{
+ struct device_node *node = tvout->dev->of_node;
+ bool sel_input_logic_inverted = false;
+
+ dev_dbg(tvout->dev, "%s\n", __func__);
+
+ if (main_path) {
+ DRM_DEBUG_DRIVER("main vip for hdmi\n");
+ /* select the input sync for hdmi = VTG set 1 */
+ tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ } else {
+ DRM_DEBUG_DRIVER("aux vip for hdmi\n");
+ /* select the input sync for hdmi = VTG set 1 */
+ tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ }
+
+ /* set color channel order */
+ tvout_vip_set_color_order(tvout,
+ TVO_VIP_REORDER_CR_R_SEL,
+ TVO_VIP_REORDER_Y_G_SEL,
+ TVO_VIP_REORDER_CB_B_SEL);
+
+ /* set clipping mode (Limited range RGB/Y) */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+
+ /* set round mode (rounded to 8-bit per component) */
+ tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED);
+
+ if (of_device_is_compatible(node, "st,stih407-tvout")) {
+ /* set input video format */
+ tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT,
+ TVO_IN_FMT_SIGNED);
+ sel_input_logic_inverted = true;
+ }
+
+ /* input selection */
+ tvout_vip_set_sel_input(tvout, main_path,
+ sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
+}
+
+/**
+ * Start HDF VIP and HD DAC
+ *
+ * @tvout: pointer on tvout structure
+ * @main_path: true if main path has to be used in the vip configuration
+ * else aux path is used.
+ */
+static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
+{
+ struct device_node *node = tvout->dev->of_node;
+ bool sel_input_logic_inverted = false;
+
+ dev_dbg(tvout->dev, "%s\n", __func__);
+
+ if (!main_path) {
+ DRM_ERROR("HD Analog on aux not implemented\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("main vip for HDF\n");
+
+ /* set color channel order */
+ tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF,
+ TVO_VIP_REORDER_CR_R_SEL,
+ TVO_VIP_REORDER_Y_G_SEL,
+ TVO_VIP_REORDER_CB_B_SEL);
+
+ /* set clipping mode (Limited range RGB/Y) */
+ tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF,
+ TVO_VIP_CLIP_LIMITED_RANGE_CB_CR);
+
+ /* set round mode (rounded to 10-bit per component) */
+ tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
+
+ if (of_device_is_compatible(node, "st,stih407-tvout")) {
+ /* set input video format */
+ tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED);
+ sel_input_logic_inverted = true;
+ }
+
+ /* Input selection */
+ tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF,
+ main_path,
+ sel_input_logic_inverted,
+ STI_TVOUT_VIDEO_OUT_YUV);
+
+ /* select the input sync for HD analog = VTG set 3
+ * and HD DCS = VTG set 2 */
+ tvout_write(tvout,
+ (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT)
+ | TVO_SYNC_MAIN_VTG_SET_3,
+ TVO_HD_SYNC_SEL);
+
+ /* power up HD DAC */
+ tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
+}
+
+static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool sti_tvout_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void sti_tvout_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct sti_tvout_encoder *sti_encoder = to_sti_tvout_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(sti_encoder);
+}
+
+static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
+ .destroy = sti_tvout_encoder_destroy,
+};
+
+static void sti_hda_encoder_commit(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ tvout_hda_start(tvout, true);
+}
+
+static void sti_hda_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ /* reset VIP register */
+ tvout_write(tvout, 0x0, TVO_VIP_HDF);
+
+ /* power down HD DAC */
+ tvout_write(tvout, 1, TVO_HD_DAC_CFG_OFF);
+}
+
+static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = {
+ .dpms = sti_tvout_encoder_dpms,
+ .mode_fixup = sti_tvout_encoder_mode_fixup,
+ .mode_set = sti_tvout_encoder_mode_set,
+ .prepare = sti_tvout_encoder_prepare,
+ .commit = sti_hda_encoder_commit,
+ .disable = sti_hda_encoder_disable,
+};
+
+static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ struct sti_tvout_encoder *encoder;
+ struct drm_encoder *drm_encoder;
+
+ encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return NULL;
+
+ encoder->tvout = tvout;
+
+ drm_encoder = (struct drm_encoder *) encoder;
+
+ drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_clones = 1 << 0;
+
+ drm_encoder_init(dev, drm_encoder,
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC);
+
+ drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs);
+
+ return drm_encoder;
+}
+
+static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ tvout_hdmi_start(tvout, true);
+}
+
+static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ /* reset VIP register */
+ tvout_write(tvout, 0x0, TVO_VIP_HDMI);
+}
+
+static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = {
+ .dpms = sti_tvout_encoder_dpms,
+ .mode_fixup = sti_tvout_encoder_mode_fixup,
+ .mode_set = sti_tvout_encoder_mode_set,
+ .prepare = sti_tvout_encoder_prepare,
+ .commit = sti_hdmi_encoder_commit,
+ .disable = sti_hdmi_encoder_disable,
+};
+
+static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ struct sti_tvout_encoder *encoder;
+ struct drm_encoder *drm_encoder;
+
+ encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return NULL;
+
+ encoder->tvout = tvout;
+
+ drm_encoder = (struct drm_encoder *) encoder;
+
+ drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_clones = 1 << 1;
+
+ drm_encoder_init(dev, drm_encoder,
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs);
+
+ return drm_encoder;
+}
+
+static void sti_tvout_create_encoders(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ tvout->hdmi = sti_tvout_create_hdmi_encoder(dev, tvout);
+ tvout->hda = sti_tvout_create_hda_encoder(dev, tvout);
+}
+
+static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
+{
+ if (tvout->hdmi)
+ drm_encoder_cleanup(tvout->hdmi);
+ tvout->hdmi = NULL;
+
+ if (tvout->hda)
+ drm_encoder_cleanup(tvout->hda);
+ tvout->hda = NULL;
+}
+
+static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_tvout *tvout = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ unsigned int i;
+ int ret;
+
+ tvout->drm_dev = drm_dev;
+
+ /* set preformatter matrix */
+ for (i = 0; i < 8; i++) {
+ tvout_write(tvout, rgb_to_ycbcr_601[i],
+ TVO_CSC_MAIN_M0 + (i * 4));
+ tvout_write(tvout, rgb_to_ycbcr_601[i],
+ TVO_CSC_AUX_M0 + (i * 4));
+ }
+
+ sti_tvout_create_encoders(drm_dev, tvout);
+
+ ret = component_bind_all(dev, drm_dev);
+ if (ret)
+ sti_tvout_destroy_encoders(tvout);
+
+ return ret;
+}
+
+static void sti_tvout_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_tvout_ops = {
+ .bind = sti_tvout_bind,
+ .unbind = sti_tvout_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int sti_tvout_master_bind(struct device *dev)
+{
+ return 0;
+}
+
+static void sti_tvout_master_unbind(struct device *dev)
+{
+ /* do nothing */
+}
+
+static const struct component_master_ops sti_tvout_master_ops = {
+ .bind = sti_tvout_master_bind,
+ .unbind = sti_tvout_master_unbind,
+};
+
+static int sti_tvout_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct sti_tvout *tvout;
+ struct resource *res;
+ struct device_node *child_np;
+ struct component_match *match = NULL;
+
+ DRM_INFO("%s\n", __func__);
+
+ if (!node)
+ return -ENODEV;
+
+ tvout = devm_kzalloc(dev, sizeof(*tvout), GFP_KERNEL);
+ if (!tvout)
+ return -ENOMEM;
+
+ tvout->dev = dev;
+
+ /* get Memory ressources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
+ if (!res) {
+ DRM_ERROR("Invalid glue resource\n");
+ return -ENOMEM;
+ }
+ tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR(tvout->regs))
+ return PTR_ERR(tvout->regs);
+
+ /* get reset resources */
+ tvout->reset = devm_reset_control_get(dev, "tvout");
+ /* take tvout out of reset */
+ if (!IS_ERR(tvout->reset))
+ reset_control_deassert(tvout->reset);
+
+ platform_set_drvdata(pdev, tvout);
+
+ of_platform_populate(node, NULL, NULL, dev);
+
+ child_np = of_get_next_available_child(node, NULL);
+
+ while (child_np) {
+ component_match_add(dev, &match, compare_of, child_np);
+ of_node_put(child_np);
+ child_np = of_get_next_available_child(node, child_np);
+ }
+
+ component_master_add_with_match(dev, &sti_tvout_master_ops, match);
+
+ return component_add(dev, &sti_tvout_ops);
+}
+
+static int sti_tvout_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &sti_tvout_master_ops);
+ component_del(&pdev->dev, &sti_tvout_ops);
+ return 0;
+}
+
+static struct of_device_id tvout_of_match[] = {
+ { .compatible = "st,stih416-tvout", },
+ { .compatible = "st,stih407-tvout", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, tvout_of_match);
+
+struct platform_driver sti_tvout_driver = {
+ .driver = {
+ .name = "sti-tvout",
+ .owner = THIS_MODULE,
+ .of_match_table = tvout_of_match,
+ },
+ .probe = sti_tvout_probe,
+ .remove = sti_tvout_remove,
+};
+
+module_platform_driver(sti_tvout_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
new file mode 100644
index 000000000000..10ced6a479f4
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Fabien Dessenne <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+
+#include "sti_layer.h"
+#include "sti_vid.h"
+#include "sti_vtg.h"
+
+/* Registers */
+#define VID_CTL 0x00
+#define VID_ALP 0x04
+#define VID_CLF 0x08
+#define VID_VPO 0x0C
+#define VID_VPS 0x10
+#define VID_KEY1 0x28
+#define VID_KEY2 0x2C
+#define VID_MPR0 0x30
+#define VID_MPR1 0x34
+#define VID_MPR2 0x38
+#define VID_MPR3 0x3C
+#define VID_MST 0x68
+#define VID_BC 0x70
+#define VID_TINT 0x74
+#define VID_CSAT 0x78
+
+/* Registers values */
+#define VID_CTL_IGNORE (BIT(31) | BIT(30))
+#define VID_CTL_PSI_ENABLE (BIT(2) | BIT(1) | BIT(0))
+#define VID_ALP_OPAQUE 0x00000080
+#define VID_BC_DFLT 0x00008000
+#define VID_TINT_DFLT 0x00000000
+#define VID_CSAT_DFLT 0x00000080
+/* YCbCr to RGB BT709:
+ * R = Y+1.5391Cr
+ * G = Y-0.4590Cr-0.1826Cb
+ * B = Y+1.8125Cb */
+#define VID_MPR0_BT709 0x0A800000
+#define VID_MPR1_BT709 0x0AC50000
+#define VID_MPR2_BT709 0x07150545
+#define VID_MPR3_BT709 0x00000AE8
+
+static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
+{
+ u32 val;
+
+ /* Unmask */
+ val = readl(vid->regs + VID_CTL);
+ val &= ~VID_CTL_IGNORE;
+ writel(val, vid->regs + VID_CTL);
+
+ return 0;
+}
+
+static int sti_vid_commit_layer(struct sti_layer *vid)
+{
+ struct drm_display_mode *mode = vid->mode;
+ u32 ydo, xdo, yds, xds;
+
+ ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
+ yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
+
+ writel((ydo << 16) | xdo, vid->regs + VID_VPO);
+ writel((yds << 16) | xds, vid->regs + VID_VPS);
+
+ return 0;
+}
+
+static int sti_vid_disable_layer(struct sti_layer *vid)
+{
+ u32 val;
+
+ /* Mask */
+ val = readl(vid->regs + VID_CTL);
+ val |= VID_CTL_IGNORE;
+ writel(val, vid->regs + VID_CTL);
+
+ return 0;
+}
+
+static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
+{
+ return NULL;
+}
+
+static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
+{
+ return 0;
+}
+
+static void sti_vid_init(struct sti_layer *vid)
+{
+ /* Enable PSI, Mask layer */
+ writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
+
+ /* Opaque */
+ writel(VID_ALP_OPAQUE, vid->regs + VID_ALP);
+
+ /* Color conversion parameters */
+ writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
+ writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
+ writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
+ writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
+
+ /* Brightness, contrast, tint, saturation */
+ writel(VID_BC_DFLT, vid->regs + VID_BC);
+ writel(VID_TINT_DFLT, vid->regs + VID_TINT);
+ writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
+}
+
+static const struct sti_layer_funcs vid_ops = {
+ .get_formats = sti_vid_get_formats,
+ .get_nb_formats = sti_vid_get_nb_formats,
+ .init = sti_vid_init,
+ .prepare = sti_vid_prepare_layer,
+ .commit = sti_vid_commit_layer,
+ .disable = sti_vid_disable_layer,
+};
+
+struct sti_layer *sti_vid_create(struct device *dev)
+{
+ struct sti_layer *vid;
+
+ vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
+ if (!vid) {
+ DRM_ERROR("Failed to allocate memory for VID\n");
+ return NULL;
+ }
+
+ vid->ops = &vid_ops;
+
+ return vid;
+}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
new file mode 100644
index 000000000000..2c0aecd63294
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Fabien Dessenne <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_VID_H_
+#define _STI_VID_H_
+
+struct sti_layer *sti_vid_create(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c
new file mode 100644
index 000000000000..82a51d488434
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtac.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+
+/* registers offset */
+#define VTAC_CONFIG 0x00
+#define VTAC_RX_FIFO_CONFIG 0x04
+#define VTAC_FIFO_CONFIG_VAL 0x04
+
+#define VTAC_SYS_CFG8521 0x824
+#define VTAC_SYS_CFG8522 0x828
+
+/* Number of phyts per pixel */
+#define VTAC_2_5_PPP 0x0005
+#define VTAC_3_PPP 0x0006
+#define VTAC_4_PPP 0x0008
+#define VTAC_5_PPP 0x000A
+#define VTAC_6_PPP 0x000C
+#define VTAC_13_PPP 0x001A
+#define VTAC_14_PPP 0x001C
+#define VTAC_15_PPP 0x001E
+#define VTAC_16_PPP 0x0020
+#define VTAC_17_PPP 0x0022
+#define VTAC_18_PPP 0x0024
+
+/* enable bits */
+#define VTAC_ENABLE 0x3003
+
+#define VTAC_TX_PHY_ENABLE_CLK_PHY BIT(0)
+#define VTAC_TX_PHY_ENABLE_CLK_DLL BIT(1)
+#define VTAC_TX_PHY_PLL_NOT_OSC_MODE BIT(3)
+#define VTAC_TX_PHY_RST_N_DLL_SWITCH BIT(4)
+#define VTAC_TX_PHY_PROG_N3 BIT(9)
+
+
+/**
+ * VTAC mode structure
+ *
+ * @vid_in_width: Video Data Resolution
+ * @phyts_width: Width of phyt buses(phyt low and phyt high).
+ * @phyts_per_pixel: Number of phyts sent per pixel
+ */
+struct sti_vtac_mode {
+ u32 vid_in_width;
+ u32 phyts_width;
+ u32 phyts_per_pixel;
+};
+
+static const struct sti_vtac_mode vtac_mode_main = {0x2, 0x2, VTAC_5_PPP};
+static const struct sti_vtac_mode vtac_mode_aux = {0x1, 0x0, VTAC_17_PPP};
+
+/**
+ * VTAC structure
+ *
+ * @dev: pointer to device structure
+ * @regs: ioremapped registers for RX and TX devices
+ * @phy_regs: phy registers for TX device
+ * @clk: clock
+ * @mode: main or auxillary configuration mode
+ */
+struct sti_vtac {
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *phy_regs;
+ struct clk *clk;
+ const struct sti_vtac_mode *mode;
+};
+
+static void sti_vtac_rx_set_config(struct sti_vtac *vtac)
+{
+ u32 config;
+
+ /* Enable VTAC clock */
+ if (clk_prepare_enable(vtac->clk))
+ DRM_ERROR("Failed to prepare/enable vtac_rx clock.\n");
+
+ writel(VTAC_FIFO_CONFIG_VAL, vtac->regs + VTAC_RX_FIFO_CONFIG);
+
+ config = VTAC_ENABLE;
+ config |= vtac->mode->vid_in_width << 4;
+ config |= vtac->mode->phyts_width << 16;
+ config |= vtac->mode->phyts_per_pixel << 23;
+ writel(config, vtac->regs + VTAC_CONFIG);
+}
+
+static void sti_vtac_tx_set_config(struct sti_vtac *vtac)
+{
+ u32 phy_config;
+ u32 config;
+
+ /* Enable VTAC clock */
+ if (clk_prepare_enable(vtac->clk))
+ DRM_ERROR("Failed to prepare/enable vtac_tx clock.\n");
+
+ /* Configure vtac phy */
+ phy_config = 0x00000000;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8522);
+ phy_config = VTAC_TX_PHY_ENABLE_CLK_PHY;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_PROG_N3;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_ENABLE_CLK_DLL;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_RST_N_DLL_SWITCH;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_PLL_NOT_OSC_MODE;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+
+ /* Configure vtac tx */
+ config = VTAC_ENABLE;
+ config |= vtac->mode->vid_in_width << 4;
+ config |= vtac->mode->phyts_width << 16;
+ config |= vtac->mode->phyts_per_pixel << 23;
+ writel(config, vtac->regs + VTAC_CONFIG);
+}
+
+static const struct of_device_id vtac_of_match[] = {
+ {
+ .compatible = "st,vtac-main",
+ .data = &vtac_mode_main,
+ }, {
+ .compatible = "st,vtac-aux",
+ .data = &vtac_mode_aux,
+ }, {
+ /* end node */
+ }
+};
+MODULE_DEVICE_TABLE(of, vtac_of_match);
+
+static int sti_vtac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct of_device_id *id;
+ struct sti_vtac *vtac;
+ struct resource *res;
+
+ vtac = devm_kzalloc(dev, sizeof(*vtac), GFP_KERNEL);
+ if (!vtac)
+ return -ENOMEM;
+
+ vtac->dev = dev;
+
+ id = of_match_node(vtac_of_match, np);
+ if (!id)
+ return -ENOMEM;
+
+ vtac->mode = id->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("Invalid resource\n");
+ return -ENOMEM;
+ }
+ vtac->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vtac->regs))
+ return PTR_ERR(vtac->regs);
+
+
+ vtac->clk = devm_clk_get(dev, "vtac");
+ if (IS_ERR(vtac->clk)) {
+ DRM_ERROR("Cannot get vtac clock\n");
+ return PTR_ERR(vtac->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ vtac->phy_regs = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ sti_vtac_tx_set_config(vtac);
+ } else {
+
+ sti_vtac_rx_set_config(vtac);
+ }
+
+ platform_set_drvdata(pdev, vtac);
+ DRM_INFO("%s %s\n", __func__, dev_name(vtac->dev));
+
+ return 0;
+}
+
+static int sti_vtac_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver sti_vtac_driver = {
+ .driver = {
+ .name = "sti-vtac",
+ .owner = THIS_MODULE,
+ .of_match_table = vtac_of_match,
+ },
+ .probe = sti_vtac_probe,
+ .remove = sti_vtac_remove,
+};
+
+module_platform_driver(sti_vtac_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
new file mode 100644
index 000000000000..740d6e347a62
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <[email protected]>
+ * Fabien Dessenne <[email protected]>
+ * Vincent Abriou <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+
+#include "sti_vtg.h"
+
+#define VTG_TYPE_MASTER 0
+#define VTG_TYPE_SLAVE_BY_EXT0 1
+
+/* registers offset */
+#define VTG_MODE 0x0000
+#define VTG_CLKLN 0x0008
+#define VTG_HLFLN 0x000C
+#define VTG_DRST_AUTOC 0x0010
+#define VTG_VID_TFO 0x0040
+#define VTG_VID_TFS 0x0044
+#define VTG_VID_BFO 0x0048
+#define VTG_VID_BFS 0x004C
+
+#define VTG_HOST_ITS 0x0078
+#define VTG_HOST_ITS_BCLR 0x007C
+#define VTG_HOST_ITM_BCLR 0x0088
+#define VTG_HOST_ITM_BSET 0x008C
+
+#define VTG_H_HD_1 0x00C0
+#define VTG_TOP_V_VD_1 0x00C4
+#define VTG_BOT_V_VD_1 0x00C8
+#define VTG_TOP_V_HD_1 0x00CC
+#define VTG_BOT_V_HD_1 0x00D0
+
+#define VTG_H_HD_2 0x00E0
+#define VTG_TOP_V_VD_2 0x00E4
+#define VTG_BOT_V_VD_2 0x00E8
+#define VTG_TOP_V_HD_2 0x00EC
+#define VTG_BOT_V_HD_2 0x00F0
+
+#define VTG_H_HD_3 0x0100
+#define VTG_TOP_V_VD_3 0x0104
+#define VTG_BOT_V_VD_3 0x0108
+#define VTG_TOP_V_HD_3 0x010C
+#define VTG_BOT_V_HD_3 0x0110
+
+#define VTG_IRQ_BOTTOM BIT(0)
+#define VTG_IRQ_TOP BIT(1)
+#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM)
+
+/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
+#define AWG_DELAY_HD (-9)
+#define AWG_DELAY_ED (-8)
+#define AWG_DELAY_SD (-7)
+
+LIST_HEAD(vtg_lookup);
+
+/**
+ * STI VTG structure
+ *
+ * @dev: pointer to device driver
+ * @data: data associated to the device
+ * @irq: VTG irq
+ * @type: VTG type (main or aux)
+ * @notifier_list: notifier callback
+ * @crtc_id: the crtc id for vblank event
+ * @slave: slave vtg
+ * @link: List node to link the structure in lookup list
+ */
+struct sti_vtg {
+ struct device *dev;
+ struct device_node *np;
+ void __iomem *regs;
+ int irq;
+ u32 irq_status;
+ struct raw_notifier_head notifier_list;
+ int crtc_id;
+ struct sti_vtg *slave;
+ struct list_head link;
+};
+
+static void vtg_register(struct sti_vtg *vtg)
+{
+ list_add_tail(&vtg->link, &vtg_lookup);
+}
+
+struct sti_vtg *of_vtg_find(struct device_node *np)
+{
+ struct sti_vtg *vtg;
+
+ list_for_each_entry(vtg, &vtg_lookup, link) {
+ if (vtg->np == np)
+ return vtg;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_vtg_find);
+
+static void vtg_reset(struct sti_vtg *vtg)
+{
+ /* reset slave and then master */
+ if (vtg->slave)
+ vtg_reset(vtg->slave);
+
+ writel(1, vtg->regs + VTG_DRST_AUTOC);
+}
+
+static void vtg_set_mode(struct sti_vtg *vtg,
+ int type, const struct drm_display_mode *mode)
+{
+ u32 tmp;
+
+ if (vtg->slave)
+ vtg_set_mode(vtg->slave, VTG_TYPE_SLAVE_BY_EXT0, mode);
+
+ writel(mode->htotal, vtg->regs + VTG_CLKLN);
+ writel(mode->vtotal * 2, vtg->regs + VTG_HLFLN);
+
+ tmp = (mode->vtotal - mode->vsync_start + 1) << 16;
+ tmp |= mode->htotal - mode->hsync_start;
+ writel(tmp, vtg->regs + VTG_VID_TFO);
+ writel(tmp, vtg->regs + VTG_VID_BFO);
+
+ tmp = (mode->vdisplay + mode->vtotal - mode->vsync_start + 1) << 16;
+ tmp |= mode->hdisplay + mode->htotal - mode->hsync_start;
+ writel(tmp, vtg->regs + VTG_VID_TFS);
+ writel(tmp, vtg->regs + VTG_VID_BFS);
+
+ /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */
+ tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ writel(tmp, vtg->regs + VTG_H_HD_1);
+ writel(tmp, vtg->regs + VTG_H_HD_2);
+
+ tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
+ tmp |= 1;
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_1);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_1);
+ writel(0, vtg->regs + VTG_TOP_V_HD_1);
+ writel(0, vtg->regs + VTG_BOT_V_HD_1);
+
+ /* prepare VTG set 2 for for HD DCS */
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
+ writel(0, vtg->regs + VTG_TOP_V_HD_2);
+ writel(0, vtg->regs + VTG_BOT_V_HD_2);
+
+ /* prepare VTG set 3 for HD Analog in HD mode */
+ tmp = (mode->hsync_end - mode->hsync_start + AWG_DELAY_HD) << 16;
+ tmp |= mode->htotal + AWG_DELAY_HD;
+ writel(tmp, vtg->regs + VTG_H_HD_3);
+
+ tmp = (mode->vsync_end - mode->vsync_start) << 16;
+ tmp |= mode->vtotal;
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_3);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_3);
+
+ tmp = (mode->htotal + AWG_DELAY_HD) << 16;
+ tmp |= mode->htotal + AWG_DELAY_HD;
+ writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
+ writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
+
+ /* mode */
+ writel(type, vtg->regs + VTG_MODE);
+}
+
+static void vtg_enable_irq(struct sti_vtg *vtg)
+{
+ /* clear interrupt status and mask */
+ writel(0xFFFF, vtg->regs + VTG_HOST_ITS_BCLR);
+ writel(0xFFFF, vtg->regs + VTG_HOST_ITM_BCLR);
+ writel(VTG_IRQ_MASK, vtg->regs + VTG_HOST_ITM_BSET);
+}
+
+void sti_vtg_set_config(struct sti_vtg *vtg,
+ const struct drm_display_mode *mode)
+{
+ /* write configuration */
+ vtg_set_mode(vtg, VTG_TYPE_MASTER, mode);
+
+ vtg_reset(vtg);
+
+ /* enable irq for the vtg vblank synchro */
+ if (vtg->slave)
+ vtg_enable_irq(vtg->slave);
+ else
+ vtg_enable_irq(vtg);
+}
+EXPORT_SYMBOL(sti_vtg_set_config);
+
+/**
+ * sti_vtg_get_line_number
+ *
+ * @mode: display mode to be used
+ * @y: line
+ *
+ * Return the line number according to the display mode taking
+ * into account the Sync and Back Porch information.
+ * Video frame line numbers start at 1, y starts at 0.
+ * In interlaced modes the start line is the field line number of the odd
+ * field, but y is still defined as a progressive frame.
+ */
+u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y)
+{
+ u32 start_line = mode.vtotal - mode.vsync_start + 1;
+
+ if (mode.flags & DRM_MODE_FLAG_INTERLACE)
+ start_line *= 2;
+
+ return start_line + y;
+}
+EXPORT_SYMBOL(sti_vtg_get_line_number);
+
+/**
+ * sti_vtg_get_pixel_number
+ *
+ * @mode: display mode to be used
+ * @x: row
+ *
+ * Return the pixel number according to the display mode taking
+ * into account the Sync and Back Porch information.
+ * Pixels are counted from 0.
+ */
+u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x)
+{
+ return mode.htotal - mode.hsync_start + x;
+}
+EXPORT_SYMBOL(sti_vtg_get_pixel_number);
+
+int sti_vtg_register_client(struct sti_vtg *vtg,
+ struct notifier_block *nb, int crtc_id)
+{
+ if (vtg->slave)
+ return sti_vtg_register_client(vtg->slave, nb, crtc_id);
+
+ vtg->crtc_id = crtc_id;
+ return raw_notifier_chain_register(&vtg->notifier_list, nb);
+}
+EXPORT_SYMBOL(sti_vtg_register_client);
+
+int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb)
+{
+ if (vtg->slave)
+ return sti_vtg_unregister_client(vtg->slave, nb);
+
+ return raw_notifier_chain_unregister(&vtg->notifier_list, nb);
+}
+EXPORT_SYMBOL(sti_vtg_unregister_client);
+
+static irqreturn_t vtg_irq_thread(int irq, void *arg)
+{
+ struct sti_vtg *vtg = arg;
+ u32 event;
+
+ event = (vtg->irq_status & VTG_IRQ_TOP) ?
+ VTG_TOP_FIELD_EVENT : VTG_BOTTOM_FIELD_EVENT;
+
+ raw_notifier_call_chain(&vtg->notifier_list, event, &vtg->crtc_id);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vtg_irq(int irq, void *arg)
+{
+ struct sti_vtg *vtg = arg;
+
+ vtg->irq_status = readl(vtg->regs + VTG_HOST_ITS);
+
+ writel(vtg->irq_status, vtg->regs + VTG_HOST_ITS_BCLR);
+
+ /* force sync bus write */
+ readl(vtg->regs + VTG_HOST_ITS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int vtg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct sti_vtg *vtg;
+ struct resource *res;
+ char irq_name[32];
+ int ret;
+
+ vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
+ if (!vtg)
+ return -ENOMEM;
+
+ vtg->dev = dev;
+ vtg->np = pdev->dev.of_node;
+
+ /* Get Memory ressources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("Get memory resource failed\n");
+ return -ENOMEM;
+ }
+ vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+
+ np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
+ if (np) {
+ vtg->slave = of_vtg_find(np);
+
+ if (!vtg->slave)
+ return -EPROBE_DEFER;
+ } else {
+ vtg->irq = platform_get_irq(pdev, 0);
+ if (IS_ERR_VALUE(vtg->irq)) {
+ DRM_ERROR("Failed to get VTG interrupt\n");
+ return vtg->irq;
+ }
+
+ snprintf(irq_name, sizeof(irq_name), "vsync-%s",
+ dev_name(vtg->dev));
+
+ RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list);
+
+ ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
+ vtg_irq_thread, IRQF_ONESHOT, irq_name, vtg);
+ if (IS_ERR_VALUE(ret)) {
+ DRM_ERROR("Failed to register VTG interrupt\n");
+ return ret;
+ }
+ }
+
+ vtg_register(vtg);
+ platform_set_drvdata(pdev, vtg);
+
+ DRM_INFO("%s %s\n", __func__, dev_name(vtg->dev));
+
+ return 0;
+}
+
+static int vtg_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id vtg_of_match[] = {
+ { .compatible = "st,vtg", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vtg_of_match);
+
+struct platform_driver sti_vtg_driver = {
+ .driver = {
+ .name = "sti-vtg",
+ .owner = THIS_MODULE,
+ .of_match_table = vtg_of_match,
+ },
+ .probe = vtg_probe,
+ .remove = vtg_remove,
+};
+
+module_platform_driver(sti_vtg_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h
new file mode 100644
index 000000000000..e84d23f1f57f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtg.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_VTG_H_
+#define _STI_VTG_H_
+
+#define VTG_TOP_FIELD_EVENT 1
+#define VTG_BOTTOM_FIELD_EVENT 2
+
+struct sti_vtg;
+struct drm_display_mode;
+struct notifier_block;
+
+struct sti_vtg *of_vtg_find(struct device_node *np);
+void sti_vtg_set_config(struct sti_vtg *vtg,
+ const struct drm_display_mode *mode);
+int sti_vtg_register_client(struct sti_vtg *vtg,
+ struct notifier_block *nb, int crtc_id);
+int sti_vtg_unregister_client(struct sti_vtg *vtg,
+ struct notifier_block *nb);
+
+u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y);
+u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 446837e955b6..0c67d7eebc94 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -140,7 +140,9 @@ static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
if (mode != DRM_MODE_DPMS_ON) {
drm_panel_disable(panel);
tegra_output_disable(output);
+ drm_panel_unprepare(panel);
} else {
+ drm_panel_prepare(panel);
tegra_output_enable(output);
drm_panel_enable(panel);
}
diff --git a/drivers/gpu/drm/tilcdc/Module.symvers b/drivers/gpu/drm/tilcdc/Module.symvers
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/gpu/drm/tilcdc/Module.symvers
+++ /dev/null
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 8ff72c8ad06b..4c7aa1d8134f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -151,7 +151,7 @@ struct panel_connector {
static void panel_connector_destroy(struct drm_connector *connector)
{
struct panel_connector *panel_connector = to_panel_connector(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(panel_connector);
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index f02cb7c02f7f..3775fd49dac4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -166,7 +166,7 @@ struct slave_connector {
static void slave_connector_destroy(struct drm_connector *connector)
{
struct slave_connector *slave_connector = to_slave_connector(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(slave_connector);
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 82fb5204565f..354c47ca6374 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -167,7 +167,7 @@ struct tfp410_connector {
static void tfp410_connector_destroy(struct drm_connector *connector)
{
struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(tfp410_connector);
}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index d7f92fe9d904..66fc6395eb54 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -35,7 +35,7 @@
#include <drm/drm_sysfs.h>
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
-atomic_t device_released;
+static atomic_t device_released;
static struct device_type ttm_drm_class_type = {
.name = "ttm",
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 863bef9f9234..09874d695188 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
+ * @gfp: GFP flags.
**/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
+ gfp_t gfp)
{
unsigned long irq_flags;
struct page *p;
@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
return 0;
@@ -382,32 +383,35 @@ out:
*
* XXX: (dchinner) Deadlock warning!
*
- * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
- * this can deadlock when called a sc->gfp_mask that is not equal to
- * GFP_KERNEL.
+ * We need to pass sc->gfp_mask to ttm_page_pool_free().
*
* This code is crying out for a shrinker per pool....
*/
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static DEFINE_MUTEX(lock);
+ static unsigned start_pool;
unsigned i;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;
- pool_offset = pool_offset % NUM_POOLS;
+ if (!mutex_trylock(&lock))
+ return SHRINK_STOP;
+ pool_offset = ++start_pool % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- shrink_pages = ttm_page_pool_free(pool, nr_free);
+ shrink_pages = ttm_page_pool_free(pool, nr_free,
+ sc->gfp_mask);
freed += nr_free - shrink_pages;
}
+ mutex_unlock(&lock);
return freed;
}
@@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
- ttm_page_pool_free(pool, npages);
+ ttm_page_pool_free(pool, npages, GFP_KERNEL);
}
/*
@@ -790,7 +794,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
return 0;
}
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
char *name)
{
spin_lock_init(&pool->lock);
@@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
+ GFP_KERNEL);
kobject_put(&_manager->kobj);
_manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index fb8259f69839..ca65df144765 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
*
* @pool: to free the pages from
* @nr_free: If set to true will free all pages in pool
+ * @gfp: GFP flags.
**/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+ gfp_t gfp)
{
unsigned long irq_flags;
struct dma_page *dma_p, *tmp;
@@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
npages_to_free, nr_free);
}
#endif
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
if (!pages_to_free) {
pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
if (pool->type != type)
continue;
/* Takes a spinlock.. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
/* This code path is called after _all_ references to the
* struct device has been dropped - so nobody should be
@@ -983,7 +984,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* shrink pool if necessary (only on !is_cached pools)*/
if (npages)
- ttm_dma_page_pool_free(pool, npages);
+ ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -993,10 +994,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
*
* XXX: (dchinner) Deadlock warning!
*
- * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
- * needs to be paid to sc->gfp_mask to determine if this can be done or not.
- * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
- * bad.
+ * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
*
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
@@ -1004,9 +1002,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
static unsigned long
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static unsigned start_pool;
unsigned idx = 0;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
unsigned long freed = 0;
@@ -1014,8 +1012,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (list_empty(&_manager->pools))
return SHRINK_STOP;
- mutex_lock(&_manager->lock);
- pool_offset = pool_offset % _manager->npools;
+ if (!mutex_trylock(&_manager->lock))
+ return SHRINK_STOP;
+ if (!_manager->npools)
+ goto out;
+ pool_offset = ++start_pool % _manager->npools;
list_for_each_entry(p, &_manager->pools, pools) {
unsigned nr_free;
@@ -1027,13 +1028,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (++idx < pool_offset)
continue;
nr_free = shrink_pages;
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
+ sc->gfp_mask);
freed += nr_free - shrink_pages;
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
+out:
mutex_unlock(&_manager->lock);
return freed;
}
@@ -1044,7 +1047,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
struct device_pools *p;
unsigned long count = 0;
- mutex_lock(&_manager->lock);
+ if (!mutex_trylock(&_manager->lock))
+ return 0;
list_for_each_entry(p, &_manager->pools, pools)
count += p->pool->npages_free;
mutex_unlock(&_manager->lock);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index dea38ab6c47d..e026a9e2942a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -105,14 +105,7 @@ static struct drm_encoder*
udl_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
+ return drm_encoder_find(connector->dev, enc_id);
}
static int udl_connector_set_property(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 7094b92d1ec7..42795674bc07 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -306,10 +306,23 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
DRM_DEBUG("\n");
ret = udl_modeset_init(dev);
+ if (ret)
+ goto err;
ret = udl_fbdev_init(dev);
+ if (ret)
+ goto err;
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ goto err_fb;
+
return 0;
+err_fb:
+ udl_fbdev_cleanup(dev);
err:
+ if (udl->urbs.count)
+ udl_free_urb_list(dev);
kfree(udl);
DRM_ERROR("%d\n", ret);
return ret;
@@ -325,6 +338,8 @@ int udl_driver_unload(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
+ drm_vblank_cleanup(dev);
+
if (udl->urbs.count)
udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index cddc4fcf35cf..dc145d320b25 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -363,6 +363,26 @@ static void udl_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
+static int udl_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct udl_framebuffer *ufb = to_udl_fb(fb);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (event)
+ drm_send_vblank_event(dev, 0, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ crtc->primary->fb = fb;
+
+ return 0;
+}
+
static void udl_crtc_prepare(struct drm_crtc *crtc)
{
}
@@ -384,6 +404,7 @@ static struct drm_crtc_helper_funcs udl_helper_funcs = {
static const struct drm_crtc_funcs udl_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.destroy = udl_crtc_destroy,
+ .page_flip = udl_crtc_page_flip,
};
static int udl_crtc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 70ddce8358b0..ed1d51006ab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -61,7 +61,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -212,7 +212,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index f31a75494e07..63c4d6f0281e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -316,7 +316,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(bo, false, true, false, 0);
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
BUG_ON(ret != 0);
ret = ttm_bo_kmap(bo, 0, 1, &map);
@@ -990,7 +990,7 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
if (unlikely(ret != 0))
return ERR_PTR(-ERESTARTSYS);
- if (file_priv->is_master) {
+ if (drm_is_master(file_priv)) {
mutex_unlock(&dev->master_mutex);
return NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 1a024e3b7285..d2bc2b03d4c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -136,7 +136,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
@@ -343,7 +343,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(bo, true, false, false, 0);
+ ret = ttm_bo_reserve(bo, true, false, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
@@ -1501,7 +1501,6 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
{
struct drm_vmw_cursor_bypass_arg *arg = data;
struct vmw_display_unit *du;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
@@ -1519,13 +1518,12 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
return 0;
}
- obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, arg->crtc_id);
+ if (!crtc) {
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 01d68f0a69dc..a432c0db257c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -127,7 +127,7 @@ static void vmw_resource_release(struct kref *kref)
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
- ttm_bo_reserve(bo, false, false, false, 0);
+ ttm_bo_reserve(bo, false, false, false, NULL);
if (!list_empty(&res->mob_head) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 800c8b60f7a2..5e79c6ad914f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -810,7 +810,7 @@ config HID_ZYDACRON
config HID_SENSOR_HUB
tristate "HID Sensors framework support"
- depends on HID
+ depends on HID && HAS_IOMEM
select MFD_CORE
default n
---help---
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6d00bb9366fa..48b66bbffc94 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -323,6 +323,7 @@
#define USB_VENDOR_ID_ETURBOTOUCH 0x22b9
#define USB_DEVICE_ID_ETURBOTOUCH 0x0006
+#define USB_DEVICE_ID_ETURBOTOUCH_2968 0x2968
#define USB_VENDOR_ID_EZKEY 0x0518
#define USB_DEVICE_ID_BTC_8193 0x0002
@@ -715,6 +716,8 @@
#define USB_VENDOR_ID_PENMOUNT 0x14e1
#define USB_DEVICE_ID_PENMOUNT_PCI 0x3500
+#define USB_DEVICE_ID_PENMOUNT_1610 0x1610
+#define USB_DEVICE_ID_PENMOUNT_1640 0x1640
#define USB_VENDOR_ID_PETALYNX 0x18b1
#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 2451c7e5febd..578bbe65902b 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -428,6 +428,7 @@ static int rmi_raw_event(struct hid_device *hdev,
return 0;
}
+#ifdef CONFIG_PM
static int rmi_post_reset(struct hid_device *hdev)
{
return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
@@ -437,6 +438,7 @@ static int rmi_post_resume(struct hid_device *hdev)
{
return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
}
+#endif /* CONFIG_PM */
#define RMI4_MAX_PAGE 0xff
#define RMI4_PAGE_SIZE 0x0100
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index a8d5c8faf8cf..e244e449cbba 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -159,17 +159,18 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
{
struct hid_sensor_hub_callbacks_list *callback;
struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
+ unsigned long flags;
- spin_lock(&pdata->dyn_callback_lock);
+ spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
list_for_each_entry(callback, &pdata->dyn_callback_list, list)
if (callback->usage_id == usage_id &&
callback->hsdev == hsdev) {
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return -EINVAL;
}
callback = kzalloc(sizeof(*callback), GFP_ATOMIC);
if (!callback) {
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return -ENOMEM;
}
callback->hsdev = hsdev;
@@ -177,7 +178,7 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
callback->usage_id = usage_id;
callback->priv = NULL;
list_add_tail(&callback->list, &pdata->dyn_callback_list);
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return 0;
}
@@ -188,8 +189,9 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
{
struct hid_sensor_hub_callbacks_list *callback;
struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
+ unsigned long flags;
- spin_lock(&pdata->dyn_callback_lock);
+ spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
list_for_each_entry(callback, &pdata->dyn_callback_list, list)
if (callback->usage_id == usage_id &&
callback->hsdev == hsdev) {
@@ -197,7 +199,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
kfree(callback);
break;
}
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return 0;
}
@@ -378,15 +380,16 @@ static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message)
{
struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
struct hid_sensor_hub_callbacks_list *callback;
+ unsigned long flags;
hid_dbg(hdev, " sensor_hub_suspend\n");
- spin_lock(&pdata->dyn_callback_lock);
+ spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
if (callback->usage_callback->suspend)
callback->usage_callback->suspend(
callback->hsdev, callback->priv);
}
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return 0;
}
@@ -395,15 +398,16 @@ static int sensor_hub_resume(struct hid_device *hdev)
{
struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
struct hid_sensor_hub_callbacks_list *callback;
+ unsigned long flags;
hid_dbg(hdev, " sensor_hub_resume\n");
- spin_lock(&pdata->dyn_callback_lock);
+ spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
if (callback->usage_callback->resume)
callback->usage_callback->resume(
callback->hsdev, callback->priv);
}
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return 0;
}
@@ -632,6 +636,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
if (name == NULL) {
hid_err(hdev, "Failed MFD device name\n");
ret = -ENOMEM;
+ kfree(hsdev);
goto err_no_mem;
}
sd->hid_sensor_hub_client_devs[
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 59badc10a08c..31e6727cd009 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -49,6 +49,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
@@ -76,6 +77,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index e84f4526eb36..ae22e3c1fc4c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -339,9 +339,13 @@ static void process_chn_event(u32 relid)
*/
do {
- hv_begin_read(&channel->inbound);
+ if (read_state)
+ hv_begin_read(&channel->inbound);
channel->onchannel_callback(arg);
- bytes_to_read = hv_end_read(&channel->inbound);
+ if (read_state)
+ bytes_to_read = hv_end_read(&channel->inbound);
+ else
+ bytes_to_read = 0;
} while (read_state && (bytes_to_read != 0));
} else {
pr_err("no channel callback for relid - %u\n", relid);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index eaaa3d843b80..23b2ce294c4c 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
/*
* Send the information to the user-level daemon.
*/
- fcopy_send_data();
schedule_delayed_work(&fcopy_work, 5*HZ);
+ fcopy_send_data();
return;
}
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index ea852537307e..521c14625b3a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -127,6 +127,17 @@ kvp_work_func(struct work_struct *dummy)
kvp_respond_to_host(NULL, HV_E_FAIL);
}
+static void poll_channel(struct vmbus_channel *channel)
+{
+ if (channel->target_cpu != smp_processor_id())
+ smp_call_function_single(channel->target_cpu,
+ hv_kvp_onchannelcallback,
+ channel, true);
+ else
+ hv_kvp_onchannelcallback(channel);
+}
+
+
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
{
int ret = 1;
@@ -155,7 +166,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
kvp_register(dm_reg_value);
kvp_transaction.active = false;
if (kvp_transaction.kvp_context)
- hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
+ poll_channel(kvp_transaction.kvp_context);
}
return ret;
}
@@ -568,7 +579,7 @@ response_done:
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
-
+ poll_channel(channel);
}
/*
@@ -603,7 +614,7 @@ void hv_kvp_onchannelcallback(void *context)
return;
}
- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
if (recvlen > 0) {
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index dd761806f0e8..3b9c9ef0deb8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -319,7 +319,7 @@ static int util_probe(struct hv_device *dev,
(struct hv_util_service *)dev_id->driver_data;
int ret;
- srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
+ srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
if (srv->util_init) {
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 5ffd81f19d01..0625e50d7a6e 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -239,50 +239,50 @@ static ssize_t adc128_show_alarm(struct device *dev,
return sprintf(buf, "%u\n", !!(alarms & mask));
}
-static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 0, 0);
+static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO,
+ adc128_show_in, NULL, 0, 0);
static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 0, 1);
static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 0, 2);
-static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 1, 0);
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO,
+ adc128_show_in, NULL, 1, 0);
static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 1, 1);
static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 1, 2);
-static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 2, 0);
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO,
+ adc128_show_in, NULL, 2, 0);
static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 2, 1);
static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 2, 2);
-static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 3, 0);
+static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO,
+ adc128_show_in, NULL, 3, 0);
static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 3, 1);
static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 3, 2);
-static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 4, 0);
+static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO,
+ adc128_show_in, NULL, 4, 0);
static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 4, 1);
static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 4, 2);
-static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 5, 0);
+static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO,
+ adc128_show_in, NULL, 5, 0);
static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 5, 1);
static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 5, 2);
-static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 6, 0);
+static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO,
+ adc128_show_in, NULL, 6, 0);
static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 6, 1);
static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 3eb4281689b5..d74241bb278c 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev,
struct adm1021_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long temp;
- int err;
+ int reg_val, err;
err = kstrtol(buf, 10, &temp);
if (err)
@@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev,
temp /= 1000;
mutex_lock(&data->update_lock);
- data->temp_max[index] = clamp_val(temp, -128, 127);
+ reg_val = clamp_val(temp, -128, 127);
+ data->temp_max[index] = reg_val * 1000;
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
- data->temp_max[index]);
+ reg_val);
mutex_unlock(&data->update_lock);
return count;
@@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev,
struct adm1021_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long temp;
- int err;
+ int reg_val, err;
err = kstrtol(buf, 10, &temp);
if (err)
@@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev,
temp /= 1000;
mutex_lock(&data->update_lock);
- data->temp_min[index] = clamp_val(temp, -128, 127);
+ reg_val = clamp_val(temp, -128, 127);
+ data->temp_min[index] = reg_val * 1000;
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
- data->temp_min[index]);
+ reg_val);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 78339e880bd6..2804571b269e 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev,
/* Update the value */
reg = (reg & 0x3F) | (val << 6);
+ /* Update the cache */
+ data->fan_div[attr->index] = reg;
+
/* Write value */
i2c_smbus_write_byte_data(client,
ADM1029_REG_FAN_DIV[attr->index], reg);
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index a8a540ca8c34..51c1a5a165ab 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ val = clamp_val(val, 0, 127000);
mutex_lock(&data->update_lock);
data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
@@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ val = clamp_val(val, 0, 127000);
mutex_lock(&data->update_lock);
data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
data->pwm[nr]);
@@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, 127000);
mutex_lock(&data->update_lock);
data->temp_min[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, 127000);
mutex_lock(&data->update_lock);
data->temp_max[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, 127000);
mutex_lock(&data->update_lock);
data->temp_crit[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f4dea5ccf17..9ee3913850d6 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, 0, 255);
+ temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, 0, 255);
+ temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, 0, 255);
+ temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index eea817296513..9f2be3dd28f3 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
get_temp_alarm, NULL, IDX_TEMP1_MAX);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
get_temp_alarm, NULL, IDX_TEMP1_CRIT);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
get_temp, NULL, IDX_TEMP2_INPUT);
static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
set_temp, IDX_TEMP2_MIN);
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index afd31042b452..d14ab3c45daa 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
- return sprintf(buf, "da9052-hwmon\n");
+ return sprintf(buf, "da9052\n");
}
static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 73b3865f1207..35eb7738d711 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
- return sprintf(buf, "da9055-hwmon\n");
+ return sprintf(buf, "da9055\n");
}
static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index fd892dd48e4c..78002de46cb6 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = DIV_ROUND_CLOSEST(val, 1000);
- if ((val < -63) || (val > 127))
- return -EINVAL;
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
mutex_lock(&data->update_lock);
data->temp_min[nr] = val;
@@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = DIV_ROUND_CLOSEST(val, 1000);
- if ((val < -63) || (val > 127))
- return -EINVAL;
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
mutex_lock(&data->update_lock);
data->temp_max[nr] = val;
@@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
{
struct emc2103_data *data = emc2103_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
- long rpm_target;
+ unsigned long rpm_target;
- int result = kstrtol(buf, 10, &rpm_target);
+ int result = kstrtoul(buf, 10, &rpm_target);
if (result < 0)
return result;
/* Datasheet states 16384 as maximum RPM target (table 3.2) */
- if ((rpm_target < 0) || (rpm_target > 16384))
- return -EINVAL;
+ rpm_target = clamp_val(rpm_target, 0, 16384);
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index bdfbe9114889..ae66f42c4d6d 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -512,7 +512,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
- pdev->name);
+ pdev_id->name);
return 0;
err_after_sysfs:
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index efee4c59239f..34b9a601ad07 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
*/
static inline s8 TEMP_TO_REG(int val)
{
- return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+ return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
}
static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
+ if (val > 255)
+ return -EINVAL;
data->vrm = val;
return count;
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
index 09de4fd12d57..4d75d4759709 100644
--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -22,7 +22,6 @@
*
*/
#include <linux/clk.h>
-#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/interrupt.h>
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index f7f9865b8b89..f6d313e528de 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -40,6 +40,7 @@ config I2C_MUX_PCA9541
config I2C_MUX_PCA954x
tristate "Philips PCA954x I2C Mux/switches"
+ depends on GPIOLIB
help
If you say yes here you get support for the Philips PCA954x
I2C mux/switch devices.
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 8fb46aab2d87..a04c49f2a011 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
config BLK_DEV_CS5520
tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
+ depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
config BLK_DEV_CS5530
tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
+ depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
config BLK_DEV_CS5535
tristate "AMD CS5535 chipset support"
- depends on X86 && !X86_64
+ depends on X86_32
select BLK_DEV_IDEDMA_PCI
help
Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
config BLK_DEV_SC1200
tristate "National SCx200 chipset support"
+ depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
This driver adds support for the on-board IDE controller on the
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 2a744a91370e..a3d3b1733c49 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
if (irq_handler == NULL)
irq_handler = ide_intr;
- if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
- goto out_up;
+ if (!host->get_lock)
+ if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+ goto out_up;
#if !defined(__mc68000__)
printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
ide_proc_unregister_port(hwif);
- free_irq(hwif->irq, hwif);
+ if (!hwif->host->get_lock)
+ free_irq(hwif->irq, hwif);
device_unregister(hwif->portdev);
device_unregister(&hwif->gendev);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index a7e68c81f89d..a077cc86421b 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -68,13 +68,13 @@
/* Defaults values */
#define BMA180_DEF_PMODE 0
#define BMA180_DEF_BW 20
-#define BMA180_DEF_SCALE 250
+#define BMA180_DEF_SCALE 2452
/* Available values for sysfs */
#define BMA180_FLP_FREQ_AVAILABLE \
"10 20 40 75 150 300"
#define BMA180_SCALE_AVAILABLE \
- "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
+ "0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
struct bma180_data {
struct i2c_client *client;
@@ -94,7 +94,7 @@ enum bma180_axis {
};
static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
-static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
+static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
{
@@ -376,6 +376,8 @@ static int bma180_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->mutex);
return ret;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val2)
+ return -EINVAL;
mutex_lock(&data->mutex);
ret = bma180_set_bw(data, val);
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 69abf9163df7..54e464e4bb72 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -110,7 +110,6 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
struct accel_3d_state *accel_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -151,14 +150,12 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&accel_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&accel_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 17aeea170566..2a5fa9a436e5 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = {
{6, 250000}, {1, 560000}
};
+/*
+ * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
+ * The userspace interface uses m/s^2 and we declare micro units
+ * So scale factor is given by:
+ * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ */
static const int mma8452_scales[3][2] = {
- {0, 977}, {0, 1953}, {0, 3906}
+ {0, 9577}, {0, 19154}, {0, 38307}
};
static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index a4db3026bec6..d5dc4c6ce86c 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -374,7 +374,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
return -EAGAIN;
}
}
- map_val = chan->channel + TOTAL_CHANNELS;
+ map_val = adc_dev->channel_step[chan->scan_index];
/*
* We check the complete FIFO. We programmed just one entry but in case
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 40f4e4935d0d..fa034a3dad78 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -110,7 +110,6 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -151,14 +150,12 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&gyro_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&gyro_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 36b1ae92e239..9f1a14009901 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -966,7 +966,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
/* Now we have the two masks, work from least sig and build up sizes */
for_each_set_bit(out_ind,
- indio_dev->active_scan_mask,
+ buffer->scan_mask,
indio_dev->masklength) {
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 258a973a1fb8..bfbf4d419f41 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
&indio_dev->event_interface->dev_attr_list);
kfree(postfix);
+ if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+ continue;
+
if (ret)
return ret;
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index f34c94380b41..96e71e103ea7 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -79,7 +79,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
struct als_state *als_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -129,14 +128,12 @@ static int als_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&als_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&als_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index d203ef4d892f..412bae86d6ae 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -74,7 +74,6 @@ static int prox_read_raw(struct iio_dev *indio_dev,
struct prox_state *prox_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -125,14 +124,12 @@ static int prox_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&prox_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&prox_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index fe063a0a21cd..752569985d1d 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -52,6 +52,7 @@
struct tcs3472_data {
struct i2c_client *client;
+ struct mutex lock;
u8 enable;
u8 control;
u8 atime;
@@ -116,10 +117,17 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ mutex_lock(&data->lock);
ret = tcs3472_req_data(data);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
return ret;
+ }
ret = i2c_smbus_read_word_data(data->client, chan->address);
+ mutex_unlock(&data->lock);
if (ret < 0)
return ret;
*val = ret;
@@ -255,6 +263,7 @@ static int tcs3472_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &tcs3472_info;
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 41cf29e2a371..b2b0937d5133 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -110,7 +110,6 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
struct magn_3d_state *magn_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -153,14 +152,12 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&magn_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&magn_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 1cd190c73788..2c0d2a4fed8c 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -78,7 +78,6 @@ static int press_read_raw(struct iio_dev *indio_dev,
struct press_state *press_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -128,14 +127,12 @@ static int press_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&press_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&press_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 5e153f6d4b48..768a0fb67dd6 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -432,8 +432,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
*/
static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
+ struct c4iw_ep *ep = handle;
+
printk(KERN_ERR MOD "ARP failure duing connect\n");
kfree_skb(skb);
+ connect_reply_upcall(ep, -EHOSTUNREACH);
+ state_set(&ep->com, DEAD);
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_put_ep(&ep->com);
}
/*
@@ -658,7 +667,7 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
}
- t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
+ t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
if (ep->com.remote_addr.ss_family == AF_INET) {
@@ -2180,7 +2189,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
BUG_ON(skb_cloned(skb));
skb_trim(skb, sizeof(struct cpl_tid_release));
- skb_get(skb);
release_tid(&dev->rdev, hwtid, skb);
return;
}
@@ -3917,7 +3925,7 @@ int __init c4iw_cm_init(void)
return 0;
}
-void __exit c4iw_cm_term(void)
+void c4iw_cm_term(void)
{
WARN_ON(!list_empty(&timeout_list));
flush_workqueue(workq);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index dd93aadc996e..7db82b24302b 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -696,6 +696,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
pr_err(MOD "error allocating status page\n");
goto err4;
}
+ rdev->status_page->db_off = 0;
return 0;
err4:
c4iw_rqtpool_destroy(rdev);
@@ -729,7 +730,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
if (ctx->dev->rdev.oc_mw_kva)
iounmap(ctx->dev->rdev.oc_mw_kva);
ib_dealloc_device(&ctx->dev->ibdev);
- iwpm_exit(RDMA_NL_C4IW);
ctx->dev = NULL;
}
@@ -826,12 +826,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
setup_debugfs(devp);
}
- ret = iwpm_init(RDMA_NL_C4IW);
- if (ret) {
- pr_err("port mapper initialization failed with %d\n", ret);
- ib_dealloc_device(&devp->ibdev);
- return ERR_PTR(ret);
- }
return devp;
}
@@ -1332,6 +1326,15 @@ static int __init c4iw_init_module(void)
pr_err("%s[%u]: Failed to add netlink callback\n"
, __func__, __LINE__);
+ err = iwpm_init(RDMA_NL_C4IW);
+ if (err) {
+ pr_err("port mapper initialization failed with %d\n", err);
+ ibnl_remove_client(RDMA_NL_C4IW);
+ c4iw_cm_term();
+ debugfs_remove_recursive(c4iw_debugfs_root);
+ return err;
+ }
+
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
return 0;
@@ -1349,6 +1352,7 @@ static void __exit c4iw_exit_module(void)
}
mutex_unlock(&dev_mutex);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+ iwpm_exit(RDMA_NL_C4IW);
ibnl_remove_client(RDMA_NL_C4IW);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 125bc5d1e175..361fff7a0742 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -908,7 +908,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_register_device(struct c4iw_dev *dev);
void c4iw_unregister_device(struct c4iw_dev *dev);
int __init c4iw_cm_init(void);
-void __exit c4iw_cm_term(void);
+void c4iw_cm_term(void);
void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
struct c4iw_dev_ucontext *uctx);
void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d13ddf1c0033..bbbcf389272c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev.priv.uuari;
- if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN)
+ if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1c4c0db05550..29ca0bb4f561 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
}
static int input_get_disposition(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+ unsigned int type, unsigned int code, int *pval)
{
int disposition = INPUT_IGNORE_EVENT;
+ int value = *pval;
switch (type) {
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
break;
}
+ *pval = value;
return disposition;
}
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
{
int disposition;
- disposition = input_get_disposition(dev, type, code, value);
+ disposition = input_get_disposition(dev, type, code, &value);
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index 758b48731415..de7be4f03d91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int keyscan_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
mutex_unlock(&input->mutex);
return retval;
}
+#endif
static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e4104f9b2e6d..fed5102e1802 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
module_platform_driver(sirfsoc_pwrc_driver);
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Binghua Duan <[email protected]>, Xianglong Du <[email protected]>");
MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ec772d962f06..ef9e0b8a9aa7 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1232, 5710, 1156, 4696
},
{
- (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+ (const char * const []){"LEN0034", "LEN0036", "LEN2002",
+ "LEN2004", NULL},
1024, 5112, 2024, 4832
},
{
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0049",
"LEN2000",
"LEN2001", /* Edge E431 */
- "LEN2002",
+ "LEN2002", /* Edge E531 */
"LEN2003",
"LEN2004", /* L440 */
"LEN2005",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 381b20d4c561..136b7b204f56 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Acer Aspire 5710 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+ },
+ },
+ {
/* Gericom Bellagio */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 977d05cd9e2e..e73cf2c71f35 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
* a=(pi*r^2)/C.
*/
int a = data[5];
- int x_res = input_abs_get_res(input, ABS_X);
- int y_res = input_abs_get_res(input, ABS_Y);
- width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+ int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
+ int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
+ width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
height = width * y_res / x_res;
}
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
input_abs_set_res(input_dev, ABS_X, features->x_resolution);
input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
} else {
- if (features->touch_max <= 2) {
+ if (features->touch_max == 1) {
input_set_abs_params(input_dev, ABS_X, 0,
features->x_max, features->x_fuzz, 0);
input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case MTTPC:
case MTTPC_B:
case TABLETPC2FG:
- if (features->device_type == BTN_TOOL_FINGER) {
- unsigned int flags = INPUT_MT_DIRECT;
-
- if (wacom_wac->features.type == TABLETPC2FG)
- flags = 0;
-
- input_mt_init_slots(input_dev, features->touch_max, flags);
- }
+ if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
/* fall through */
case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_RIGHT, input_dev->keybit);
if (features->touch_max) {
- /* touch interface */
- unsigned int flags = INPUT_MT_POINTER;
-
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
- } else {
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- flags = 0;
}
- input_mt_init_slots(input_dev, features->touch_max, flags);
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
} else {
/* buttons/keys only interface */
__clear_bit(ABS_X, input_dev->absbit);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 4e793a17361f..2ce649520fe0 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
*/
err = of_property_read_u32(node, "ti,coordinate-readouts",
&ts_dev->coordinate_readouts);
- if (err < 0)
+ if (err < 0) {
+ dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
err = of_property_read_u32(node, "ti,coordiante-readouts",
&ts_dev->coordinate_readouts);
+ }
+
if (err < 0)
return err;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index b99dd88e31b9..bb446d742a2d 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn)
static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
{
/* Bug if not a power of 2 */
- BUG_ON(!is_power_of_2(addrspace_size));
+ BUG_ON((addrspace_size & (addrspace_size - 1)));
/* window size is 2^(WSE+1) bytes */
- return __ffs(addrspace_size) - 1;
+ return fls64(addrspace_size) - 2;
}
/* Derive the PAACE window count encoding for the subwindow count */
@@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
struct paace *ppaace;
unsigned long fspi;
- if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
+ if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
pr_debug("window size too small or not a power of two %llx\n", win_size);
return -EINVAL;
}
@@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
return -ENOENT;
}
- if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
+ if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
pr_debug("subwindow size out of range, or not a power of 2\n");
return -EINVAL;
}
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 93072ba44b1d..af47648301a9 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova)
* Size must be a power of two and at least be equal
* to PAMU page size.
*/
- if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
+ if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
pr_debug("%s: size too small or not a power of two\n", __func__);
return -EINVAL;
}
@@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
return domain;
}
-static inline struct device_domain_info *find_domain(struct device *dev)
-{
- return dev->archdata.iommu_domain;
-}
-
static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
{
unsigned long flags;
@@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* Check here if the device is already attached to domain or not.
* If the device is already attached to a domain detach it.
*/
- old_domain_info = find_domain(dev);
+ old_domain_info = dev->archdata.iommu_domain;
if (old_domain_info && old_domain_info->domain != dma_domain) {
spin_unlock_irqrestore(&device_domain_lock, flags);
detach_device(dev, old_domain_info->domain);
@@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* the info for the first LIODN as all
* LIODNs share the same domain
*/
- if (!old_domain_info)
+ if (!dev->archdata.iommu_domain)
dev->archdata.iommu_domain = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -1042,12 +1037,15 @@ root_bus:
group = get_shared_pci_device_group(pdev);
}
+ if (!group)
+ group = ERR_PTR(-ENODEV);
+
return group;
}
static int fsl_pamu_add_device(struct device *dev)
{
- struct iommu_group *group = NULL;
+ struct iommu_group *group = ERR_PTR(-ENODEV);
struct pci_dev *pdev;
const u32 *prop;
int ret, len;
@@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev)
group = get_device_iommu_group(dev);
}
- if (!group || IS_ERR(group))
+ if (IS_ERR(group))
return PTR_ERR(group);
ret = iommu_group_add_device(group, dev);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7e11c9d6ae8c..7c131cf7cc13 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -42,6 +42,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <asm/cputype.h>
#include <asm/irq.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
@@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
}
for_each_possible_cpu(cpu) {
- unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+ u32 mpidr = cpu_logical_map(cpu);
+ u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ unsigned long offset = percpu_offset * core_id;
*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
}
@@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
gic_cnt++;
return 0;
}
+IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index c44950d3eb7b..b7ae0a0dd5b6 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2400,6 +2400,7 @@ allocerr:
error:
freeurbs(cs);
usb_set_intfdata(interface, NULL);
+ usb_put_dev(udev);
gigaset_freecs(cs);
return rc;
}
diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c
index 0df6691d045c..8dc791bfaa6f 100644
--- a/drivers/isdn/hisax/l3ni1.c
+++ b/drivers/isdn/hisax/l3ni1.c
@@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
- if (ic->parm.ni1_io.timeout > 0)
- if (!(pc = ni1_new_l3_process(st, -1)))
- { free_invoke_id(st, id);
+ if (ic->parm.ni1_io.timeout > 0) {
+ pc = ni1_new_l3_process(st, -1);
+ if (!pc) {
+ free_invoke_id(st, id);
return (-2);
}
- pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */
- pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */
+ /* remember id */
+ pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
+ /* and procedure */
+ pc->prot.ni1.proc = ic->parm.ni1_io.proc;
+ }
if (!(skb = l3_alloc_skb(l)))
{ free_invoke_id(st, id);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 61ac63237446..62f0688d45a5 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
{
struct sock_fprog uprog;
struct sock_filter *code = NULL;
- int len, err;
+ int len;
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
@@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
if (IS_ERR(code))
return PTR_ERR(code);
- err = sk_chk_filter(code, uprog.len);
- if (err) {
- kfree(code);
- return err;
- }
-
*p = code;
return uprog.len;
}
@@ -644,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
fprog.len = len;
fprog.filter = code;
- if (is->pass_filter)
+ if (is->pass_filter) {
sk_unattached_filter_destroy(is->pass_filter);
- err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+ is->pass_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&is->pass_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
return err;
@@ -663,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
fprog.len = len;
fprog.filter = code;
- if (is->active_filter)
+ if (is->active_filter) {
sk_unattached_filter_destroy(is->active_filter);
- err = sk_unattached_filter_create(&is->active_filter, &fprog);
+ is->active_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&is->active_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
return err;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 4e84095833db..d724459860d9 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1541,7 +1541,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
BUG_ON(block_size < 1 << SECTOR_SHIFT ||
(block_size & (block_size - 1)));
- c = kmalloc(sizeof(*c), GFP_KERNEL);
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
r = -ENOMEM;
goto bad_client;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4ead4ba60656..d2899e7eb3aa 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
disk_super = dm_block_data(sblock);
+ /* Verify the data block size hasn't changed */
+ if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
+ DMERR("changing the data block size (from %u to %llu) is not supported",
+ le32_to_cpu(disk_super->data_block_size),
+ (unsigned long long)cmd->data_block_size);
+ r = -EINVAL;
+ goto bad;
+ }
+
r = __check_incompat_features(disk_super, cmd);
if (r < 0)
goto bad;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5f054c44b485..2c63326638b6 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -231,7 +231,7 @@ struct cache {
/*
* cache_size entries, dirty if set
*/
- dm_cblock_t nr_dirty;
+ atomic_t nr_dirty;
unsigned long *dirty_bitset;
/*
@@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
{
if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
- cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
+ atomic_inc(&cache->nr_dirty);
policy_set_dirty(cache->policy, oblock);
}
}
@@ -501,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
{
if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
policy_clear_dirty(cache->policy, oblock);
- cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
- if (!from_cblock(cache->nr_dirty))
+ if (atomic_dec_return(&cache->nr_dirty) == 0)
dm_table_event(cache->ti->table);
}
}
@@ -2269,7 +2268,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->quiescing_ack, 0);
r = -ENOMEM;
- cache->nr_dirty = 0;
+ atomic_set(&cache->nr_dirty, 0);
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) {
*error = "could not allocate dirty bitset";
@@ -2808,7 +2807,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy);
- DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
+ DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
(unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata,
@@ -2821,7 +2820,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned) atomic_read(&cache->stats.write_miss),
(unsigned) atomic_read(&cache->stats.demotion),
(unsigned) atomic_read(&cache->stats.promotion),
- (unsigned long long) from_cblock(cache->nr_dirty));
+ (unsigned long) atomic_read(&cache->nr_dirty));
if (writethrough_mode(&cache->features))
DMEMIT("1 writethrough ");
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 53b213226c01..4cba2d808afb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003 Christophe Saout <[email protected]>
+ * Copyright (C) 2003 Jana Saout <[email protected]>
* Copyright (C) 2004 Clemens Fruhwirth <[email protected]>
* Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
* Copyright (C) 2013 Milan Broz <[email protected]>
@@ -1996,6 +1996,6 @@ static void __exit dm_crypt_exit(void)
module_init(dm_crypt_init);
module_exit(dm_crypt_exit);
-MODULE_AUTHOR("Christophe Saout <[email protected]>");
+MODULE_AUTHOR("Jana Saout <[email protected]>");
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3842ac738f98..db404a0f7e2c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -10,6 +10,7 @@
#include <linux/device-mapper.h>
#include <linux/bio.h>
+#include <linux/completion.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -32,7 +33,7 @@ struct dm_io_client {
struct io {
unsigned long error_bits;
atomic_t count;
- struct task_struct *sleeper;
+ struct completion *wait;
struct dm_io_client *client;
io_notify_fn callback;
void *context;
@@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
invalidate_kernel_vmap_range(io->vma_invalidate_address,
io->vma_invalidate_size);
- if (io->sleeper)
- wake_up_process(io->sleeper);
+ if (io->wait)
+ complete(io->wait);
else {
unsigned long r = io->error_bits;
@@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
*/
volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
+ DECLARE_COMPLETION_ONSTACK(wait);
if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
WARN_ON(1);
@@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
- io->sleeper = current;
+ io->wait = &wait;
io->client = client;
io->vma_invalidate_address = dp->vma_invalidate_address;
@@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
dispatch_io(rw, num_regions, where, dp, io, 1);
- while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!atomic_read(&io->count))
- break;
-
- io_schedule();
- }
- set_current_state(TASK_RUNNING);
+ wait_for_completion_io(&wait);
if (error_bits)
*error_bits = io->error_bits;
@@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io = mempool_alloc(client->pool, GFP_NOIO);
io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
- io->sleeper = NULL;
+ io->wait = NULL;
io->client = client;
io->callback = fn;
io->context = context;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3f6fd9d33ba3..f4167b013d99 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1611,8 +1611,9 @@ static int multipath_busy(struct dm_target *ti)
spin_lock_irqsave(&m->lock, flags);
- /* pg_init in progress, requeue until done */
- if (!pg_ready(m)) {
+ /* pg_init in progress or no paths available */
+ if (m->pg_init_in_progress ||
+ (!m->nr_valid_paths && m->queue_if_no_path)) {
busy = 1;
goto out;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b086a945edcb..e9d33ad59df5 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
disk_super = dm_block_data(sblock);
+ /* Verify the data block size hasn't changed */
+ if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
+ DMERR("changing the data block size (from %u to %llu) is not supported",
+ le32_to_cpu(disk_super->data_block_size),
+ (unsigned long long)pmd->data_block_size);
+ r = -EINVAL;
+ goto bad_unlock_sblock;
+ }
+
r = __check_incompat_features(disk_super, pmd);
if (r < 0)
goto bad_unlock_sblock;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index c99003e0d47a..b9a64bbce304 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003 Christophe Saout <[email protected]>
+ * Copyright (C) 2003 Jana Saout <[email protected]>
*
* This file is released under the GPL.
*/
@@ -79,6 +79,6 @@ static void __exit dm_zero_exit(void)
module_init(dm_zero_init)
module_exit(dm_zero_exit)
-MODULE_AUTHOR("Christophe Saout <[email protected]>");
+MODULE_AUTHOR("Jana Saout <[email protected]>");
MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 437d99045ef2..32b958dbc499 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w);
static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
+static struct workqueue_struct *deferred_remove_workqueue;
+
/*
* For bio-based dm.
* One of these is allocated per bio.
@@ -276,16 +278,24 @@ static int __init local_init(void)
if (r)
goto out_free_rq_tio_cache;
+ deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
+ if (!deferred_remove_workqueue) {
+ r = -ENOMEM;
+ goto out_uevent_exit;
+ }
+
_major = major;
r = register_blkdev(_major, _name);
if (r < 0)
- goto out_uevent_exit;
+ goto out_free_workqueue;
if (!_major)
_major = r;
return 0;
+out_free_workqueue:
+ destroy_workqueue(deferred_remove_workqueue);
out_uevent_exit:
dm_uevent_exit();
out_free_rq_tio_cache:
@@ -299,6 +309,7 @@ out_free_io_cache:
static void local_exit(void)
{
flush_scheduled_work();
+ destroy_workqueue(deferred_remove_workqueue);
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
@@ -407,7 +418,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
if (atomic_dec_and_test(&md->open_count) &&
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
- schedule_work(&deferred_remove_work);
+ queue_work(deferred_remove_workqueue, &deferred_remove_work);
dm_put(md);
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 8637d2ed7623..2e3cdcfa0a67 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
- if (!(cmd->args[0] >> 7) & 0x01) {
+ if (!((cmd->args[0] >> 7) & 0x01)) {
ret = -ETIMEDOUT;
goto err_mutex_unlock;
}
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
- cmd.args[0] = 0x05;
- cmd.args[1] = 0x00;
- cmd.args[2] = 0xaa;
- cmd.args[3] = 0x4d;
- cmd.args[4] = 0x56;
- cmd.args[5] = 0x40;
- cmd.args[6] = 0x00;
- cmd.args[7] = 0x00;
- cmd.wlen = 8;
- cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
- if (ret)
- goto err;
-
/* cold state - try to download firmware */
dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
KBUILD_MODNAME, si2168_ops.info.name);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 2a343e896f40..53f7f06ae343 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -22,7 +22,7 @@
#include <linux/firmware.h>
#include <linux/i2c-mux.h>
-#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw"
+#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
/* state struct */
struct si2168 {
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 522fe00f5eee..9619be5d4827 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 mode, rolloff, pilot, inversion, div;
+ fe_modulation_t modulation;
dev_dbg(&priv->i2c->dev,
"%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_DVBS:
+ modulation = QPSK;
rolloff = 0;
pilot = 2;
break;
case SYS_DVBS2:
+ modulation = c->modulation;
+
switch (c->rolloff) {
case ROLLOFF_20:
rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
- c->modulation == TDA10071_MODCOD[i].modulation &&
+ modulation == TDA10071_MODCOD[i].modulation &&
c->fec_inner == TDA10071_MODCOD[i].fec) {
mode = TDA10071_MODCOD[i].val;
dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
switch ((buf[1] >> 0) & 0x01) {
case 0:
- c->inversion = INVERSION_OFF;
+ c->inversion = INVERSION_ON;
break;
case 1:
- c->inversion = INVERSION_ON;
+ c->inversion = INVERSION_OFF;
break;
}
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
if (ret)
goto error;
- c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0);
+ c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
return ret;
error:
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 4baf14bfb65a..420486192736 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
{ SYS_DVBS2, QPSK, FEC_8_9, 0x0a },
{ SYS_DVBS2, QPSK, FEC_9_10, 0x0b },
/* 8PSK */
+ { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
{ SYS_DVBS2, PSK_8, FEC_3_5, 0x0c },
{ SYS_DVBS2, PSK_8, FEC_2_3, 0x0d },
{ SYS_DVBS2, PSK_8, FEC_3_4, 0x0e },
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e65c760e4e8b..0006d6bf8c18 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops ts_ioctl_ops = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a7ed16497903..1e4ec697fb10 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -269,6 +269,7 @@ err:
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return ret;
}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 5bb085b19bcb..b431b58f39e3 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -233,6 +233,7 @@ err:
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return ret;
}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 271a752cee54..fa4cc7b880aa 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
- if (!(buf[0] >> 7) & 0x01) {
+ if (!((buf[0] >> 7) & 0x01)) {
ret = -ETIMEDOUT;
goto err_mutex_unlock;
} else {
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 021e4d35e4d7..7b9b75f60774 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
if (ret < 0)
goto err;
- if (tmp == 0x00)
- dev_dbg(&d->udev->dev,
- "%s: [%d]tuner not set, using default\n",
- __func__, i);
- else
+ dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
+ __func__, i, tmp);
+
+ /* tuner sanity check */
+ if (state->chip_type == 0x9135) {
+ if (state->chip_version == 0x02) {
+ /* IT9135 BX (v2) */
+ switch (tmp) {
+ case AF9033_TUNER_IT9135_60:
+ case AF9033_TUNER_IT9135_61:
+ case AF9033_TUNER_IT9135_62:
+ state->af9033_config[i].tuner = tmp;
+ break;
+ }
+ } else {
+ /* IT9135 AX (v1) */
+ switch (tmp) {
+ case AF9033_TUNER_IT9135_38:
+ case AF9033_TUNER_IT9135_51:
+ case AF9033_TUNER_IT9135_52:
+ state->af9033_config[i].tuner = tmp;
+ break;
+ }
+ }
+ } else {
+ /* AF9035 */
state->af9033_config[i].tuner = tmp;
+ }
- dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
- __func__, i, state->af9033_config[i].tuner);
+ if (state->af9033_config[i].tuner != tmp) {
+ dev_info(&d->udev->dev,
+ "%s: [%d] overriding tuner from %02x to %02x\n",
+ KBUILD_MODNAME, i, tmp,
+ state->af9033_config[i].tuner);
+ }
switch (state->af9033_config[i].tuner) {
case AF9033_TUNER_TUA9001:
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2fd1c5e31a0f..339adce7c7a5 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x2620)},
{USB_DEVICE(0x093a, 0x2621)},
{USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
+ {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2625)},
{USB_DEVICE(0x093a, 0x2626)},
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 0500c4175d5f..6bce01a674f9 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
}
/*=========================================================================*/
-/* bufffer bits */
+/* buffer bits */
/* function expects dev->io_mutex to be hold by caller */
int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_AUDIO_ENCODING:
if (dev->flags & HDPVR_FLAG_AC3_CAP) {
opt->audio_codec = ctrl->val;
- return hdpvr_set_audio(dev, opt->audio_input,
+ return hdpvr_set_audio(dev, opt->audio_input + 1,
opt->audio_codec);
}
return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
V4L2_CID_MPEG_AUDIO_ENCODING,
ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
- 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
+ 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
V4L2_CID_MPEG_VIDEO_ENCODING,
V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4ae54caadd03..ce1c9f5d9dee 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
aspect.denominator = 9;
} else if (ratio == 34) {
aspect.numerator = 4;
- aspect.numerator = 3;
+ aspect.denominator = 3;
} else if (ratio == 68) {
aspect.numerator = 15;
- aspect.numerator = 9;
+ aspect.denominator = 9;
} else {
aspect.numerator = hor_landscape + 99;
aspect.denominator = 100;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e4ec355704a6..a7543ba3e190 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -52,6 +52,11 @@
/* Atmel chips */
#define AT49BV640D 0x02de
#define AT49BV640DT 0x02db
+/* Sharp chips */
+#define LH28F640BFHE_PTTL90 0x00b0
+#define LH28F640BFHE_PBTL90 0x00b1
+#define LH28F640BFHE_PTTL70A 0x00b2
+#define LH28F640BFHE_PBTL70A 0x00b3
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
};
+static int is_LH28F640BF(struct cfi_private *cfi)
+{
+ /* Sharp LH28F640BF Family */
+ if (cfi->mfr == CFI_MFR_SHARP && (
+ cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
+ cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
+ return 1;
+ return 0;
+}
+
+static void fixup_LH28F640BF(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ /* Reset the Partition Configuration Register on LH28F640BF
+ * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
+ if (is_LH28F640BF(cfi)) {
+ printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
+ map_write(map, CMD(0x60), 0);
+ map_write(map, CMD(0x04), 0);
+
+ /* We have set one single partition thus
+ * Simultaneous Operations are not allowed */
+ printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
+ extp->FeatureSupport &= ~512;
+ }
+}
+
static void fixup_use_point(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
@@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
{ 0, 0, NULL }
};
@@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
initial_adr = adr;
cmd_adr = adr & ~(wbufsize-1);
+ /* Sharp LH28F640BF chips need the first address for the
+ * Page Buffer Program command. See Table 5 of
+ * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
+ if (is_LH28F640BF(cfi))
+ cmd_adr = adr;
+
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index 7df86948e6d4..b4f61c7fc161 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info)
ELM_SYNDROME_FRAGMENT_1 + offset);
regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_0 + offset);
+ break;
default:
return -EINVAL;
}
@@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info)
regs->elm_syndrome_fragment_1[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
regs->elm_syndrome_fragment_0[i]);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 41167e9e991e..4f3e80c68a26 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd)
ecc->layout->oobavail += ecc->layout->oobfree[i].length;
mtd->oobavail = ecc->layout->oobavail;
- /* ECC sanity check: warn noisily if it's too weak */
- WARN_ON(!nand_ecc_strength_good(mtd));
+ /* ECC sanity check: warn if it's too weak */
+ if (!nand_ecc_strength_good(mtd))
+ pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+ mtd->name);
/*
* Set the number of read / write steps for one page depending on ECC
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b04e7d059888..0431b46d9fd9 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb);
- if (vol_id < av->vol_id)
+ if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
pnum, err);
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
- } else if (ret == UBI_IO_BITFLIPS)
+ } else if (err == UBI_IO_BITFLIPS)
scrub = 1;
/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3a451b6cd3d5..701f86cd5993 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond_params *params)
}
if (ad_select) {
- bond_opt_initstr(&newval, lacp_rate);
+ bond_opt_initstr(&newval, ad_select);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
&newval);
if (!valptr) {
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 824108cd9fd5..12430be6448a 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -287,7 +287,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
}
- priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+ priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index c83584a26713..5a1891faba8a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -339,7 +339,8 @@ static int xgbe_probe(struct platform_device *pdev)
/* Calculate the number of Tx and Rx rings to be created */
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.tx_ch_cnt);
- if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+ if (ret) {
dev_err(dev, "error setting real tx queue count\n");
goto err_io;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 141160ef249a..5776e503e4c5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
- if (work_done < budget) {
+ if (work_done == 0) {
napi_complete(napi);
/* re-enable TX interrupt */
intrl2_1_mask_clear(ring->priv, BIT(ring->index));
}
- return work_done;
+ return 0;
}
static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
usleep_range(1000, 2000);
}
-static inline int umac_reset(struct bcm_sysport_priv *priv)
+static inline void umac_reset(struct bcm_sysport_priv *priv)
{
- unsigned int timeout = 0;
u32 reg;
- int ret = 0;
-
- umac_writel(priv, 0, UMAC_CMD);
- while (timeout++ < 1000) {
- reg = umac_readl(priv, UMAC_CMD);
- if (!(reg & CMD_SW_RESET))
- break;
-
- udelay(1);
- }
-
- if (timeout == 1000) {
- dev_err(&priv->pdev->dev,
- "timeout waiting for MAC to come out of reset\n");
- ret = -ETIMEDOUT;
- }
- return ret;
+ reg = umac_readl(priv, UMAC_CMD);
+ reg |= CMD_SW_RESET;
+ umac_writel(priv, reg, UMAC_CMD);
+ udelay(10);
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_SW_RESET;
+ umac_writel(priv, reg, UMAC_CMD);
}
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev)
int ret;
/* Reset UniMAC */
- ret = umac_reset(priv);
- if (ret) {
- netdev_err(dev, "UniMAC reset failed\n");
- return ret;
- }
+ umac_reset(priv);
/* Flush TX and RX FIFOs at TOPCTRL level */
topctrl_flush(priv);
@@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
dev->needed_headroom += sizeof(struct bcm_tsb);
- /* We are interfaced to a switch which handles the multicast
- * filtering for us, so we do not support programming any
- * multicast hash table in this Ethernet MAC.
- */
- dev->flags &= ~IFF_MULTICAST;
-
/* libphy will adjust the link state accordingly */
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4cab09d3f807..8206a293e6b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -346,6 +346,7 @@ struct sw_tx_bd {
u8 flags;
/* Set on the first BD descriptor when there is a split BD */
#define BNX2X_TSO_SPLIT_BD (1<<0)
+#define BNX2X_HAS_SECOND_PBD (1<<1)
};
struct sw_rx_page {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 47c5814114e1..c43e7238de21 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+ /* Skip second parse bd... */
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -797,7 +803,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return;
}
- bnx2x_frag_free(fp, new_data);
+ if (new_data)
+ bnx2x_frag_free(fp, new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
@@ -3888,6 +3895,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set encapsulation flag in start BD */
SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+ tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
nbd++;
} else if (xmit_type & XMIT_CSUM) {
/* Set PBD in checksum offload case w/o encapsulation */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bd0600cf7266..25eddd90f482 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -379,6 +379,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case PORT_FIBRE:
case PORT_DA:
+ case PORT_NONE:
if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
bp->port.supported[1] & SUPPORTED_FIBRE)) {
DP(BNX2X_MSG_ETHTOOL,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2887034523e0..6a8b1453a1b9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
* without the default SB.
* For VFs there is no default SB, then we return (index+1).
*/
- pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 5ba1cfbd60da..4e615debe472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1149,6 +1149,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ if (skb_padto(skb, ETH_ZLEN)) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
/* set the SKB transmit checksum */
if (priv->desc_64b_en) {
ret = bcmgenet_put_tx_csum(dev, skb);
@@ -1408,13 +1413,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
if (cb->skb)
continue;
- /* set the DMA descriptor length once and for all
- * it will only change if we support dynamically sizing
- * priv->rx_buf_len, but we do not
- */
- dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
- priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
-
ret = bcmgenet_rx_refill(priv, cb);
if (ret)
break;
@@ -2535,14 +2533,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
- err = register_netdev(dev);
- if (err)
- goto err_clk_disable;
+ /* libphy will determine the link state */
+ netif_carrier_off(dev);
/* Turn off the main clock, WOL clock is handled separately */
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
+ err = register_netdev(dev);
+ if (err)
+ goto err;
+
return err;
err_clk_disable:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0f117105fed1..e23c993b1362 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
#define EXT_ENERGY_DET_MASK (1 << 12)
#define EXT_RGMII_OOB_CTRL 0x0C
-#define RGMII_MODE_EN (1 << 0)
#define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5)
+#define RGMII_MODE_EN (1 << 6)
#define ID_MODE_DIS (1 << 16)
#define EXT_GPHY_CTRL 0x1C
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 34a26e42f19d..1e187fb760f8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
be_enable_busy_poll(eqo);
- be_eq_notify(adapter, eqo->q.id, true, false, 0);
+ be_eq_notify(adapter, eqo->q.id, true, true, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index fab39e295441..36fc429298e3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
if (ug_info->rxExtendedFiltering) {
size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ug_info->largestexternallookupkeysize ==
- QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
if (ug_info->largestexternallookupkeysize ==
- QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a2db388cc31e..ee74f9536b31 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
s32 ret_val;
u16 i, rar_count = mac->rar_entry_count;
+ if ((hw->mac.type >= e1000_i210) &&
+ !(igb_get_flash_presence_i210(hw))) {
+ ret_val = igb_pll_workaround_i210(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Initialize identification LED */
ret_val = igb_id_led_init(hw);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2a8bb35c2df2..217f8138851b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -46,14 +46,15 @@
#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
/* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD 0x00004000
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
-#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
-#define E1000_CTRL_EXT_EIAME 0x01000000
-#define E1000_CTRL_EXT_IRCA 0x00000001
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
+#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IRCA 0x00000001
/* Interrupt delay cancellation */
/* Driver loaded bit for FW */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@@ -62,6 +63,7 @@
/* packet buffer parity error detection enabled */
/* descriptor FIFO parity error detection enable */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
#define E1000_I2CCMD_REG_ADDR_SHIFT 16
#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
#define E1000_I2CCMD_OPCODE_READ 0x08000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 89925e405849..ce55ea5d750c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
/* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 337161f440dd..65d931669f81 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
}
return ret_val;
}
+
+/**
+ * igb_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+ u16 nvm_word, phy_word, pci_word, tmp_nvm;
+ int i;
+
+ /* Get and set needed register values */
+ wuc = rd32(E1000_WUC);
+ mdicnfg = rd32(E1000_MDICNFG);
+ reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+ wr32(E1000_MDICNFG, reg_val);
+
+ /* Get data from NVM, or set default */
+ ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+ &nvm_word);
+ if (ret_val)
+ nvm_word = E1000_INVM_DEFAULT_AL;
+ tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+ /* check current state directly from internal PHY */
+ igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+ E1000_PHY_PLL_FREQ_REG), &phy_word);
+ if ((phy_word & E1000_PHY_PLL_UNCONF)
+ != E1000_PHY_PLL_UNCONF) {
+ ret_val = 0;
+ break;
+ } else {
+ ret_val = -E1000_ERR_PHY;
+ }
+ /* directly reset the internal PHY */
+ ctrl = rd32(E1000_CTRL);
+ wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+
+ wr32(E1000_WUC, 0);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+ wr32(E1000_EEARBC_I210, reg_val);
+
+ igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ pci_word |= E1000_PCI_PMCSR_D3;
+ igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ usleep_range(1000, 2000);
+ pci_word &= ~E1000_PCI_PMCSR_D3;
+ igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+ wr32(E1000_EEARBC_I210, reg_val);
+
+ /* restore WUC register */
+ wr32(E1000_WUC, wuc);
+ }
+ /* restore MDICNFG setting */
+ wr32(E1000_MDICNFG, mdicnfg);
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 9f34976687ba..3442b6357d01 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_pll_workaround_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+/* PLL Defines */
+#define E1000_PCI_PMCSR 0x44
+#define E1000_PCI_PMCSR_D3 0x03
+#define E1000_MAX_PLL_TRIES 5
+#define E1000_PHY_PLL_UNCONF 0xFF
+#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
+#define E1000_PHY_PLL_FREQ_REG 0x000E
+#define E1000_INVM_DEFAULT_AL 0x202F
+#define E1000_INVM_AUTOLOAD 0x0A
+#define E1000_INVM_PLL_WO_VAL 0x0010
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 1cc4b1a7e597..f5ba4e4eafb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -66,6 +66,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f145adbb55ac..a9537ba7a5a0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, *value);
+}
+
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
@@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
if (netif_running(netdev))
igb_close(netdev);
+ else
+ igb_reset(adapter);
igb_clear_interrupt_scheme(adapter);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 45beca17fa50..dadd9a5f6323 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
- if (l3_proto == swab16(ETH_P_IP))
+ if (l3_proto == htons(ETH_P_IP))
command |= MVNETA_TXD_IP_CSUM;
else
command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
if (phydev->speed == SPEED_1000)
val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else
+ else if (phydev->speed == SPEED_100)
val |= MVNETA_GMAC_CONFIG_MII_SPEED;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 80f725228f5b..56022d647837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free);
cq->irq = priv->eq_table.eq[cq->vector].irq;
- cq->irq_affinity_change = false;
-
return 0;
err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 4b2130760eed..82322b1c8411 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -128,11 +128,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name);
}
+
}
} else {
cq->vector = (cq->ring + 1 + priv->port) %
mdev->dev->caps.num_comp_vectors;
}
+
+ cq->irq_desc =
+ irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+ cq->vector));
} else {
/* For TX we use the same irq per
ring we assigned for the RX */
@@ -187,8 +192,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector) {
- if (!cq->is_tx)
- irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector);
}
cq->vector = 0;
@@ -204,6 +207,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
+ irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fa1a069e14e6..68d763d2d030 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
coal->tx_coalesce_usecs = priv->tx_usecs;
coal->tx_max_coalesced_frames = priv->tx_frames;
+ coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
+
coal->rx_coalesce_usecs = priv->rx_usecs;
coal->rx_max_coalesced_frames = priv->rx_frames;
@@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
coal->rate_sample_interval = priv->sample_interval;
coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+
return 0;
}
@@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ if (!coal->tx_max_coalesced_frames_irq)
+ return -EINVAL;
+
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
priv->sample_interval = coal->rate_sample_interval;
priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+ priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
return mlx4_en_moderation_update(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7d4fb7bf2593..7345c43b019e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
__be16 current_port;
- if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
if (sa_family == AF_INET6)
@@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
+ priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d2d415732d99..5535862f27cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -40,6 +40,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <linux/irq.h>
#include "mlx4_en.h"
@@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
+ skb_mark_napi_id(gro_skb, &cq->napi);
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget) {
+ int cpu_curr;
+ const struct cpumask *aff;
+
INC_PERF_COUNTER(priv->pstats.napi_quota);
- if (unlikely(cq->mcq.irq_affinity_change)) {
- cq->mcq.irq_affinity_change = false;
+
+ cpu_curr = smp_processor_id();
+ aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+
+ if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
+ /* Current cpu is not according to smp_irq_affinity -
+ * probably affinity changed. need to stop this NAPI
+ * poll, and restart it on the right CPU
+ */
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return 0;
}
} else {
/* Done for now */
- cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8be7483f8236..5045bab59633 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-static int mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq,
- int budget)
+static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
int factor = priv->cqe_factor;
u64 timestamp = 0;
int done = 0;
+ int budget = priv->tx_work_limit;
if (!priv->port_up)
- return 0;
+ return true;
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
@@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
- return done;
+ return done < budget;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- int done;
+ int clean_complete;
- done = mlx4_en_process_tx_cq(dev, cq, budget);
+ clean_complete = mlx4_en_process_tx_cq(dev, cq);
+ if (!clean_complete)
+ return budget;
- /* If we used up all the quota - we're probably not done yet... */
- if (done < budget) {
- /* Done for now */
- cq->mcq.irq_affinity_change = false;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
- return done;
- } else if (unlikely(cq->mcq.irq_affinity_change)) {
- cq->mcq.irq_affinity_change = false;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
- return 0;
- }
- return budget;
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+
+ return 0;
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d954ec1eac17..2a004b347e1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,11 +53,6 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
-struct mlx4_irq_notify {
- void *arg;
- struct irq_affinity_notify notify;
-};
-
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base);
}
-static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct mlx4_irq_notify *n = container_of(notify,
- struct mlx4_irq_notify,
- notify);
- struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
- struct radix_tree_iter iter;
- void **slot;
-
- radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
- struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
-
- if (cq->irq == notify->irq)
- cq->irq_affinity_change = true;
- }
-}
-
-static void mlx4_release_irq_notifier(struct kref *ref)
-{
- struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
- notify.kref);
- kfree(n);
-}
-
-static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
- struct mlx4_dev *dev, int irq)
-{
- struct mlx4_irq_notify *irq_notifier = NULL;
- int err = 0;
-
- irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
- if (!irq_notifier) {
- mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
- irq);
- return;
- }
-
- irq_notifier->notify.irq = irq;
- irq_notifier->notify.notify = mlx4_irq_notifier_notify;
- irq_notifier->notify.release = mlx4_release_irq_notifier;
- irq_notifier->arg = priv;
- err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
- if (err) {
- kfree(irq_notifier);
- irq_notifier = NULL;
- mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
- }
-}
-
-
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
continue;
/*we dont want to break here*/
}
- mlx4_assign_irq_notifier(priv, dev,
- priv->eq_table.eq[vec].irq);
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
}
EXPORT_SYMBOL(mlx4_assign_eq);
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return priv->eq_table.eq[vec].irq;
+}
+EXPORT_SYMBOL(mlx4_eq_get_irq);
+
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
Belonging to a legacy EQ*/
mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
- irq_set_affinity_notifier(
- priv->eq_table.eq[vec].irq,
- NULL);
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 0e15295bedd6..d72a5a894fc6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -126,6 +126,8 @@ enum {
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
MLX4_EN_NUM_UP)
+#define MLX4_EN_DEFAULT_TX_WORK 256
+
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
#define MLX4_EN_RX_COAL_TIME 0x10
@@ -343,6 +345,7 @@ struct mlx4_en_cq {
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
#endif /* CONFIG_NET_RX_BUSY_POLL */
+ struct irq_desc *irq_desc;
};
struct mlx4_en_port_profile {
@@ -542,6 +545,7 @@ struct mlx4_en_priv {
__be32 ctrl_flags;
u32 flags;
u8 num_tx_rings_p_up;
+ u32 tx_work_limit;
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_skb_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ba0401d4af50..184c3615f479 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
write_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
write_unlock_irq(&table->lock);
+ if (err) {
+ mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
+ mlx5_base_mkey(mr->key), err);
+ mlx5_core_destroy_mkey(dev, mr);
+ }
return err;
}
@@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
struct mlx5_mr_table *table = &dev->priv.mr_table;
struct mlx5_destroy_mkey_mbox_in in;
struct mlx5_destroy_mkey_mbox_out out;
+ struct mlx5_core_mr *deleted_mr;
unsigned long flags;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
+ write_lock_irqsave(&table->lock, flags);
+ deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
+ write_unlock_irqrestore(&table->lock, flags);
+ if (!deleted_mr) {
+ mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
+ mlx5_base_mkey(mr->key));
+ return -ENOENT;
+ }
+
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
@@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
- write_lock_irqsave(&table->lock, flags);
- radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
- write_unlock_irqrestore(&table->lock, flags);
-
return err;
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index be425ad5e824..61623e9af574 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -538,6 +538,7 @@ enum rtl_register_content {
MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
+ Rdy_to_L23 = (1 << 1), /* L23 Enable */
Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
/* Config4 register */
@@ -4239,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
@@ -4897,6 +4900,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
PCI_EXP_LNKCTL_CLKREQ_EN);
}
+static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u8 data;
+
+ data = RTL_R8(Config3);
+
+ if (enable)
+ data |= Rdy_to_L23;
+ else
+ data &= ~Rdy_to_L23;
+
+ RTL_W8(Config3, data);
+}
+
#define R8168_CPCMD_QUIRK_MASK (\
EnableBist | \
Mac_dbgo_oe | \
@@ -5246,6 +5264,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
};
rtl_hw_start_8168f(tp);
+ rtl_pcie_state_l2l3_enable(tp, false);
rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
@@ -5284,6 +5303,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5536,6 +5557,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5571,6 +5594,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5583,6 +5608,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8101(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b3e148ef5683..9d3748361a1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
{
- u32 value;
-
- value = readl(ioaddr + GMAC_AN_CTRL);
/* auto negotiation enable and External Loopback enable */
- value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+ u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
if (restart)
value |= GMAC_AN_CTRL_RAN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7e6628a91514..1e2bcf5f89e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
x->rx_msg_type_delay_req++;
else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
x->rx_msg_type_delay_resp++;
- else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
x->rx_msg_type_pdelay_req++;
else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
x->rx_msg_type_pdelay_resp++;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1c24a8f368bd..d813bfb1a847 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -610,6 +610,13 @@ static int __vnet_tx_trigger(struct vnet_port *port)
return err;
}
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+ struct vio_driver_state *vio = &vnet->vio;
+
+ return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
struct vnet_port *port;
hlist_for_each_entry(port, hp, hash) {
+ if (!port_is_up(port))
+ continue;
if (ether_addr_equal(port->raddr, skb->data))
return port;
}
- port = NULL;
- if (!list_empty(&vp->port_list))
- port = list_entry(vp->port_list.next, struct vnet_port, list);
-
- return port;
+ list_for_each_entry(port, &vp->port_list, list) {
+ if (!port->switch_port)
+ continue;
+ if (!port_is_up(port))
+ continue;
+ return port;
+ }
+ return NULL;
}
struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
@@ -1083,6 +1095,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
return vp;
}
+static void vnet_cleanup(void)
+{
+ struct vnet *vp;
+ struct net_device *dev;
+
+ mutex_lock(&vnet_list_mutex);
+ while (!list_empty(&vnet_list)) {
+ vp = list_first_entry(&vnet_list, struct vnet, list);
+ list_del(&vp->list);
+ dev = vp->dev;
+ /* vio_unregister_driver() should have cleaned up port_list */
+ BUG_ON(!list_empty(&vp->port_list));
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ mutex_unlock(&vnet_list_mutex);
+}
+
static const char *local_mac_prop = "local-mac-address";
static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1270,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
kfree(port);
- unregister_netdev(vp->dev);
}
return 0;
}
@@ -1268,6 +1297,7 @@ static int __init vnet_init(void)
static void __exit vnet_exit(void)
{
vio_unregister_driver(&vnet_port_driver);
+ vnet_cleanup();
}
module_init(vnet_init);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index eb78203cd58e..2aa57270838f 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -291,7 +291,11 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
static void dfx_rcv_queue_process(DFX_board_t *bp);
+#ifdef DYNAMIC_BUFFERS
static void dfx_rcv_flush(DFX_board_t *bp);
+#else
+static inline void dfx_rcv_flush(DFX_board_t *bp) {}
+#endif
static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
struct net_device *dev);
@@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
* Align an sk_buff to a boundary power of 2
*
*/
-
+#ifdef DYNAMIC_BUFFERS
static void my_skb_align(struct sk_buff *skb, int n)
{
unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
skb_reserve(skb, v - x);
}
-
+#endif
/*
* ================
@@ -3074,10 +3078,7 @@ static void dfx_rcv_queue_process(
break;
}
else {
-#ifndef DYNAMIC_BUFFERS
- if (! rx_in_place)
-#endif
- {
+ if (!rx_in_place) {
/* Receive buffer allocated, pass receive packet up */
skb_copy_to_linear_data(skb,
@@ -3453,10 +3454,6 @@ static void dfx_rcv_flush( DFX_board_t *bp )
}
}
-#else
-static inline void dfx_rcv_flush( DFX_board_t *bp )
-{
-}
#endif /* DYNAMIC_BUFFERS */
/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4ed38eaecea8..d97d5f39a04e 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -378,8 +378,10 @@ static int netvsc_init_buf(struct hv_device *device)
net_device->send_section_map =
kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
- if (net_device->send_section_map == NULL)
+ if (net_device->send_section_map == NULL) {
+ ret = -ENOMEM;
goto cleanup;
+ }
goto exit;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 6a999e6814a0..9408157a246c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1323,15 +1323,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
{
struct dp83640_private *dp83640 = phydev->priv;
- if (!dp83640->hwts_rx_en)
- return false;
-
if (is_status_frame(skb, type)) {
decode_status_frame(dp83640, skb);
kfree_skb(skb);
return true;
}
+ if (!dp83640->hwts_rx_en)
+ return false;
+
SKB_PTP_TYPE(skb) = type;
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_work(&dp83640->ts_work);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e58aa54484c..203651ebccb0 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
return d ? to_mii_bus(d) : NULL;
}
EXPORT_SYMBOL(of_mdio_find_bus);
+
+/* Walk the list of subnodes of a mdio bus and look for a node that matches the
+ * phy's address with its 'reg' property. If found, set the of_node pointer for
+ * the phy. This allows auto-probed pyh devices to be supplied with information
+ * passed in via DT.
+ */
+static void of_mdiobus_link_phydev(struct mii_bus *mdio,
+ struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *child;
+
+ if (dev->of_node || !mdio->dev.of_node)
+ return;
+
+ for_each_available_child_of_node(mdio->dev.of_node, child) {
+ int addr;
+ int ret;
+
+ ret = of_property_read_u32(child, "reg", &addr);
+ if (ret < 0) {
+ dev_err(dev, "%s has invalid PHY address\n",
+ child->full_name);
+ continue;
+ }
+
+ /* A PHY must have a reg property in the range [0-31] */
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "%s PHY address %i is too large\n",
+ child->full_name, addr);
+ continue;
+ }
+
+ if (addr == phydev->addr) {
+ dev->of_node = child;
+ return;
+ }
+ }
+}
+#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
+static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
+ struct phy_device *phydev)
+{
+}
#endif
/**
@@ -211,6 +255,7 @@ int mdiobus_register(struct mii_bus *bus)
bus->dev.parent = bus->parent;
bus->dev.class = &mdio_bus_class;
+ bus->dev.driver = bus->parent->driver;
bus->dev.groups = NULL;
dev_set_name(&bus->dev, "%s", bus->id);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 35d753d22f78..22c57be4dfa0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -355,7 +355,7 @@ int phy_device_register(struct phy_device *phydev)
phydev->bus->phy_map[phydev->addr] = phydev;
/* Run all of the fixups for this PHY */
- err = phy_init_hw(phydev);
+ err = phy_scan_fixups(phydev);
if (err) {
pr_err("PHY %d failed to initialize\n", phydev->addr);
goto out;
@@ -575,6 +575,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
+ struct module *bus_module;
int err;
/* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
return -EBUSY;
}
+ /* Increment the bus module reference count */
+ bus_module = phydev->bus->dev.driver ?
+ phydev->bus->dev.driver->owner : NULL;
+ if (!try_module_get(bus_module)) {
+ dev_err(&dev->dev, "failed to get the bus module\n");
+ return -EIO;
+ }
+
phydev->attached_dev = dev;
dev->phydev = phydev;
@@ -664,6 +673,10 @@ EXPORT_SYMBOL(phy_attach);
void phy_detach(struct phy_device *phydev)
{
int i;
+
+ if (phydev->bus->dev.driver)
+ module_put(phydev->bus->dev.driver->owner);
+
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_suspend(phydev);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 91d6c1272fcf..d5b77ef3a210 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
{
struct sock_fprog uprog;
struct sock_filter *code = NULL;
- int len, err;
+ int len;
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
@@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
if (IS_ERR(code))
return PTR_ERR(code);
- err = sk_chk_filter(code, uprog.len);
- if (err) {
- kfree(code);
- return err;
- }
-
*p = code;
return uprog.len;
}
@@ -763,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
};
ppp_lock(ppp);
- if (ppp->pass_filter)
+ if (ppp->pass_filter) {
sk_unattached_filter_destroy(ppp->pass_filter);
- err = sk_unattached_filter_create(&ppp->pass_filter,
- &fprog);
+ ppp->pass_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&ppp->pass_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
ppp_unlock(ppp);
}
@@ -784,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
};
ppp_lock(ppp);
- if (ppp->active_filter)
+ if (ppp->active_filter) {
sk_unattached_filter_destroy(ppp->active_filter);
- err = sk_unattached_filter_create(&ppp->active_filter,
- &fprog);
+ ppp->active_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&ppp->active_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
ppp_unlock(ppp);
}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea7efd11857..6c9c16d76935 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
dev->hard_header_len);
- po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
+ po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9ea4bfe5d318..2a32d9167d3b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -341,6 +341,22 @@ next_desc:
usb_driver_release_interface(driver, info->data);
return -ENODEV;
}
+
+ /* Some devices don't initialise properly. In particular
+ * the packet filter is not reset. There are devices that
+ * don't do reset all the way. So the packet filter should
+ * be set to a sane initial value.
+ */
+ usb_control_msg(dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ USB_CDC_SET_ETHERNET_PACKET_FILTER,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ NULL,
+ 0,
+ USB_CTRL_SET_TIMEOUT
+ );
return 0;
bad_desc:
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a3a05869309d..a4272ed62da8 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -258,10 +258,8 @@ struct hso_serial {
* so as not to drop characters on the floor.
*/
int curr_rx_urb_idx;
- u16 curr_rx_urb_offset;
u8 rx_urb_filled[MAX_RX_URBS];
struct tasklet_struct unthrottle_tasklet;
- struct work_struct retry_unthrottle_workqueue;
};
struct hso_device {
@@ -1252,14 +1250,6 @@ static void hso_unthrottle(struct tty_struct *tty)
tasklet_hi_schedule(&serial->unthrottle_tasklet);
}
-static void hso_unthrottle_workfunc(struct work_struct *work)
-{
- struct hso_serial *serial =
- container_of(work, struct hso_serial,
- retry_unthrottle_workqueue);
- hso_unthrottle_tasklet(serial);
-}
-
/* open the requested serial port */
static int hso_serial_open(struct tty_struct *tty, struct file *filp)
{
@@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
tasklet_init(&serial->unthrottle_tasklet,
(void (*)(unsigned long))hso_unthrottle_tasklet,
(unsigned long)serial);
- INIT_WORK(&serial->retry_unthrottle_workqueue,
- hso_unthrottle_workfunc);
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
if (result) {
hso_stop_serial_device(serial->parent);
@@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
if (!usb_gone)
hso_stop_serial_device(serial->parent);
tasklet_kill(&serial->unthrottle_tasklet);
- cancel_work_sync(&serial->retry_unthrottle_workqueue);
}
if (!usb_gone)
@@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb)
static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
{
struct tty_struct *tty;
- int write_length_remaining = 0;
- int curr_write_len;
+ int count;
/* Sanity check */
if (urb == NULL || serial == NULL) {
@@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
tty = tty_port_tty_get(&serial->port);
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+ tty_kref_put(tty);
+ return -1;
+ }
+
/* Push data to tty */
- write_length_remaining = urb->actual_length -
- serial->curr_rx_urb_offset;
D1("data to push to tty");
- while (write_length_remaining) {
- if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
- tty_kref_put(tty);
- return -1;
- }
- curr_write_len = tty_insert_flip_string(&serial->port,
- urb->transfer_buffer + serial->curr_rx_urb_offset,
- write_length_remaining);
- serial->curr_rx_urb_offset += curr_write_len;
- write_length_remaining -= curr_write_len;
+ count = tty_buffer_request_room(&serial->port, urb->actual_length);
+ if (count >= urb->actual_length) {
+ tty_insert_flip_string(&serial->port, urb->transfer_buffer,
+ urb->actual_length);
tty_flip_buffer_push(&serial->port);
+ } else {
+ dev_warn(&serial->parent->usb->dev,
+ "dropping data, %d bytes lost\n", urb->actual_length);
}
+
tty_kref_put(tty);
- if (write_length_remaining == 0) {
- serial->curr_rx_urb_offset = 0;
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
- }
- return write_length_remaining;
+ serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
+
+ return 0;
}
@@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
}
}
serial->curr_rx_urb_idx = 0;
- serial->curr_rx_urb_offset = 0;
if (serial->tx_urb)
usb_kill_urb(serial->tx_urb);
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 5d95a13dbe2a..735f7dadb9a0 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
.driver_info = (unsigned long)&huawei_cdc_ncm_info,
},
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
/* Terminating entry */
{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cf62d7e8329f..22756db53dca 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -741,6 +742,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
+ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
@@ -756,6 +758,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
+ {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 25431965a625..3eab74c7c554 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -282,7 +282,7 @@
/* USB_DEV_STAT */
#define STAT_SPEED_MASK 0x0006
#define STAT_SPEED_HIGH 0x0000
-#define STAT_SPEED_FULL 0x0001
+#define STAT_SPEED_FULL 0x0002
/* USB_TX_AGG */
#define TX_AGG_MAX_THRESHOLD 0x03
@@ -1359,7 +1359,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
struct sk_buff_head seg_list;
struct sk_buff *segs, *nskb;
- features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+ features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs) || !segs)
goto drop;
@@ -2292,9 +2292,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
/* rx share fifo credit full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
- ocp_data &= STAT_SPEED_MASK;
- if (ocp_data == STAT_SPEED_FULL) {
+ if (tp->udev->speed == USB_SPEED_FULL ||
+ tp->udev->speed == USB_SPEED_LOW) {
/* rx share fifo credit near full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
RXFIFO_THR2_FULL);
@@ -3204,8 +3203,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
struct r8152 *tp = netdev_priv(dev);
struct tally_counter tally;
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
+ usb_autopm_put_interface(tp->intf);
+
data[0] = le64_to_cpu(tally.tx_packets);
data[1] = le64_to_cpu(tally.rx_packets);
data[2] = le64_to_cpu(tally.tx_errors);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 424db65e4396..d07bf4cb893f 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
return ret;
}
+static int smsc95xx_reset_resume(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ int ret;
+
+ ret = smsc95xx_reset(dev);
+ if (ret < 0)
+ return ret;
+
+ return smsc95xx_resume(intf);
+}
+
static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
{
skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
.probe = usbnet_probe,
.suspend = smsc95xx_suspend,
.resume = smsc95xx_resume,
- .reset_resume = smsc95xx_resume,
+ .reset_resume = smsc95xx_reset_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
.supports_autosuspend = 1,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ade33ef82823..9f79192c9aa0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -339,7 +339,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
ndm->ndm_state = fdb->state;
ndm->ndm_ifindex = vxlan->dev->ifindex;
ndm->ndm_flags = fdb->flags;
- ndm->ndm_type = NDA_DST;
+ ndm->ndm_type = RTN_UNICAST;
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 93ace042d0aa..1f041271f7fe 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2363,7 +2363,7 @@ static char *type_strings[] = {
"FarSync TE1"
};
-static void
+static int
fst_init_card(struct fst_card_info *card)
{
int i;
@@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
* we'll have to revise it in some way then.
*/
for (i = 0; i < card->nports; i++) {
- err = register_hdlc_device(card->ports[i].dev);
- if (err < 0) {
- int j;
+ err = register_hdlc_device(card->ports[i].dev);
+ if (err < 0) {
pr_err("Cannot register HDLC device for port %d (errno %d)\n",
- i, -err);
- for (j = i; j < card->nports; j++) {
- free_netdev(card->ports[j].dev);
- card->ports[j].dev = NULL;
- }
- card->nports = i;
- break;
- }
+ i, -err);
+ while (i--)
+ unregister_hdlc_device(card->ports[i].dev);
+ return err;
+ }
}
pr_info("%s-%s: %s IRQ%d, %d ports\n",
port_to_dev(&card->ports[0])->name,
port_to_dev(&card->ports[card->nports - 1])->name,
type_strings[card->type], card->irq, card->nports);
+ return 0;
}
static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Try to enable the device */
if ((err = pci_enable_device(pdev)) != 0) {
pr_err("Failed to enable card. Err %d\n", -err);
- kfree(card);
- return err;
+ goto enable_fail;
}
if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
pr_err("Failed to allocate regions. Err %d\n", -err);
- pci_disable_device(pdev);
- kfree(card);
- return err;
+ goto regions_fail;
}
/* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->phys_ctlmem = pci_resource_start(pdev, 3);
if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
pr_err("Physical memory remap failed\n");
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- kfree(card);
- return -ENODEV;
+ err = -ENODEV;
+ goto ioremap_physmem_fail;
}
if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
pr_err("Control memory remap failed\n");
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->mem);
- kfree(card);
- return -ENODEV;
+ err = -ENODEV;
+ goto ioremap_ctlmem_fail;
}
dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
/* Register the interrupt handler */
if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
pr_err("Unable to register interrupt %d\n", card->irq);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENODEV;
+ err = -ENODEV;
+ goto irq_fail;
}
/* Record info we need */
@@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
while (i--)
free_netdev(card->ports[i].dev);
pr_err("FarSync: out of memory\n");
- free_irq(card->irq, card);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENODEV;
+ err = -ENOMEM;
+ goto hdlcdev_fail;
}
card->ports[i].dev = dev;
card->ports[i].card = card;
@@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, card);
/* Remainder of card setup */
+ if (no_of_cards_added >= FST_MAX_CARDS) {
+ pr_err("FarSync: too many cards\n");
+ err = -ENOMEM;
+ goto card_array_fail;
+ }
fst_card_array[no_of_cards_added] = card;
card->card_no = no_of_cards_added++; /* Record instance and bump it */
- fst_init_card(card);
+ err = fst_init_card(card);
+ if (err)
+ goto init_card_fail;
if (card->family == FST_FAMILY_TXU) {
/*
* Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&card->rx_dma_handle_card);
if (card->rx_dma_handle_host == NULL) {
pr_err("Could not allocate rx dma buffer\n");
- fst_disable_intr(card);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto rx_dma_fail;
}
card->tx_dma_handle_host =
pci_alloc_consistent(card->device, FST_MAX_MTU,
&card->tx_dma_handle_card);
if (card->tx_dma_handle_host == NULL) {
pr_err("Could not allocate tx dma buffer\n");
- fst_disable_intr(card);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- iounmap(card->ctlmem);
- iounmap(card->mem);
- kfree(card);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto tx_dma_fail;
}
}
return 0; /* Success */
+
+tx_dma_fail:
+ pci_free_consistent(card->device, FST_MAX_MTU,
+ card->rx_dma_handle_host,
+ card->rx_dma_handle_card);
+rx_dma_fail:
+ fst_disable_intr(card);
+ for (i = 0 ; i < card->nports ; i++)
+ unregister_hdlc_device(card->ports[i].dev);
+init_card_fail:
+ fst_card_array[card->card_no] = NULL;
+card_array_fail:
+ for (i = 0 ; i < card->nports ; i++)
+ free_netdev(card->ports[i].dev);
+hdlcdev_fail:
+ free_irq(card->irq, card);
+irq_fail:
+ iounmap(card->ctlmem);
+ioremap_ctlmem_fail:
+ iounmap(card->mem);
+ioremap_physmem_fail:
+ pci_release_regions(pdev);
+regions_fail:
+ pci_disable_device(pdev);
+enable_fail:
+ kfree(card);
+ return err;
}
/*
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5895f1978691..fa9fdfa128c1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
{
struct x25_asy *sl = netdev_priv(dev);
unsigned char *xbuff, *rbuff;
- int len = 2 * newmtu;
+ int len;
+ if (newmtu > 65534)
+ return -EINVAL;
+
+ len = 2 * newmtu;
xbuff = kmalloc(len + 4, GFP_ATOMIC);
rbuff = kmalloc(len + 4, GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 82017f56e661..e6c56c5bb0f6 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -795,7 +795,11 @@ int ath10k_core_start(struct ath10k *ar)
if (status)
goto err_htc_stop;
- ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
+ else
+ ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+
INIT_LIST_HEAD(&ar->arvifs);
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 6c102b1312ff..eebc860c3655 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -312,7 +312,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
- bool corrupted = false;
lockdep_assert_held(&htt->rx_ring.lock);
@@ -439,9 +438,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
- if (msdu_chaining && !last_msdu)
- corrupted = true;
-
if (last_msdu) {
msdu->next = NULL;
break;
@@ -457,20 +453,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_chaining = -1;
/*
- * Apparently FW sometimes reports weird chained MSDU sequences with
- * more than one rx descriptor. This seems like a bug but needs more
- * analyzing. For the time being fix it by dropping such sequences to
- * avoid blowing up the host system.
- */
- if (corrupted) {
- ath10k_warn("failed to pop chained msdus, dropping\n");
- ath10k_htt_rx_free_msdu_chain(*head_msdu);
- *head_msdu = NULL;
- *tail_msdu = NULL;
- msdu_chaining = -EINVAL;
- }
-
- /*
* Don't refill the ring yet.
*
* First, the elements popped here are still in use - it is not
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 66acb2cbd9df..7c28cb55610b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -887,6 +887,15 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
tx_info = IEEE80211_SKB_CB(skb);
tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
+ /*
+ * No aggregation session is running, but there may be frames
+ * from a previous session or a failed attempt in the queue.
+ * Send them out as normal data frames
+ */
+ if (!tid->active)
+ tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
bf->bf_state.bf_type = 0;
return bf;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 6db51a666f61..d06fcb05adf2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1184,8 +1184,6 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->bus_priv.usb = bus_pub;
dev_set_drvdata(dev, bus);
bus->ops = &brcmf_usb_bus_ops;
- bus->chip = bus_pub->devid;
- bus->chiprev = bus_pub->chiprev;
bus->proto_type = BRCMF_PROTO_BCDC;
bus->always_use_fws_queue = true;
@@ -1194,6 +1192,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
if (ret)
goto fail;
}
+ bus->chip = bus_pub->devid;
+ bus->chiprev = bus_pub->chiprev;
+
/* request firmware here */
brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
brcmf_usb_probe_phase2);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index ed50de6362ed..6dc5dd3ced44 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
/* recalculate basic rates */
iwl_calc_basic_rates(priv, ctx);
- /*
- * force CTS-to-self frames protection if RTS-CTS is not preferred
- * one aggregation protection method
- */
- if (!priv->hw_params.use_rts_for_aggregation)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
else
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- if (bss_conf->use_cts_prot)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
- else
- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
-
memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
if (vif->type == NL80211_IFTYPE_AP ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 0aa7c0085c9f..b1a33322b9ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -88,6 +88,7 @@
* P2P client interfaces simultaneously if they are in different bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in same bindings.
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 8b5302777632..8b79081d4885 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,10 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- if (vif->bss_conf.use_cts_prot) {
+ if (vif->bss_conf.use_cts_prot)
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
- cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
- }
+
IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
vif->bss_conf.use_cts_prot,
vif->bss_conf.ht_operation_mode);
@@ -1073,8 +1072,12 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
/* Fill the common data for all mac context types */
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
- /* Also enable probe requests to pass */
- cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ /*
+ * pass probe requests and beacons from other APs (needed
+ * for ht protection)
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_IN_BEACON);
/* Fill the data specific for ap mode */
iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
@@ -1095,6 +1098,13 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
/* Fill the common data for all mac context types */
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+ /*
+ * pass probe requests and beacons from other APs (needed
+ * for ht protection)
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_IN_BEACON);
+
/* Fill the data specific for GO mode */
iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
action == FW_CTXT_ACTION_ADD);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7215f5980186..98556d03c1ed 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1159,8 +1159,12 @@ static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
bcast_mac = &cmd->macs[mvmvif->id];
- /* enable filtering only for associated stations */
- if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ /*
+ * enable filtering only for associated stations, but not for P2P
+ * Clients
+ */
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
+ !vif->bss_conf.assoc)
return;
bcast_mac->default_discard = 1;
@@ -1237,10 +1241,6 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
return 0;
- /* bcast filtering isn't supported for P2P client */
- if (vif->p2p)
- return 0;
-
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
return 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 4b6c7d4bd199..eac2b424f6a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -588,9 +588,7 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
struct iwl_scan_offload_cmd *scan,
struct iwl_mvm_scan_params *params)
{
- scan->channel_count =
- mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
- mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ scan->channel_count = req->n_channels;
scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
@@ -669,61 +667,37 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
struct iwl_scan_channel_cfg *channels,
enum ieee80211_band band,
- int *head, int *tail,
+ int *head,
u32 ssid_bitmap,
struct iwl_mvm_scan_params *params)
{
- struct ieee80211_supported_band *s_band;
- int n_channels = req->n_channels;
- int i, j, index = 0;
- bool partial;
+ int i, index = 0;
- /*
- * We have to configure all supported channels, even if we don't want to
- * scan on them, but we have to send channels in the order that we want
- * to scan. So add requested channels to head of the list and others to
- * the end.
- */
- s_band = &mvm->nvm_data->bands[band];
-
- for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
- partial = false;
- for (j = 0; j < n_channels; j++)
- if (s_band->channels[i].center_freq ==
- req->channels[j]->center_freq) {
- index = *head;
- (*head)++;
- /*
- * Channels that came with the request will be
- * in partial scan .
- */
- partial = true;
- break;
- }
- if (!partial) {
- index = *tail;
- (*tail)--;
- }
- channels->channel_number[index] =
- cpu_to_le16(ieee80211_frequency_to_channel(
- s_band->channels[i].center_freq));
+ for (i = 0; i < req->n_channels; i++) {
+ struct ieee80211_channel *chan = req->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ index = *head;
+ (*head)++;
+
+ channels->channel_number[index] = cpu_to_le16(chan->hw_value);
channels->dwell_time[index][0] = params->dwell[band].active;
channels->dwell_time[index][1] = params->dwell[band].passive;
channels->iter_count[index] = cpu_to_le16(1);
channels->iter_interval[index] = 0;
- if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR))
+ if (!(chan->flags & IEEE80211_CHAN_NO_IR))
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
channels->type[index] |=
- cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
- if (partial)
- channels->type[index] |=
- cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
+ IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
- if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
+ if (chan->flags & IEEE80211_CHAN_NO_HT40)
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
@@ -740,7 +714,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
- int tail = band_2ghz + band_5ghz - 1;
u32 ssid_bitmap;
int cmd_len;
int ret;
@@ -772,7 +745,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
&scan_cfg->scan_cmd.tx_cmd[0],
scan_cfg->data);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
- IEEE80211_BAND_2GHZ, &head, &tail,
+ IEEE80211_BAND_2GHZ, &head,
ssid_bitmap, &params);
}
if (band_5ghz) {
@@ -782,7 +755,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
scan_cfg->data +
SCAN_OFFLOAD_PROBE_REQ_SIZE);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
- IEEE80211_BAND_5GHZ, &head, &tail,
+ IEEE80211_BAND_5GHZ, &head,
ssid_bitmap, &params);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 7091a18d5a72..98950e45c7b0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
@@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 5b32106182f8..fe0f66f73507 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -185,6 +185,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
+ memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
tx_info_aggr->bss_type = tx_info_src->bss_type;
tx_info_aggr->bss_num = tx_info_src->bss_num;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e95dec91a561..b511613bba2d 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -220,6 +220,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
}
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = pkt_len;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 8dee6c86f4f1..c161141f6c39 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -453,6 +453,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
if (skb) {
rx_info = MWIFIEX_SKB_RXCB(skb);
+ memset(rx_info, 0, sizeof(*rx_info));
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index cbabc12fbda3..e91cd0fa5ca8 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -645,6 +645,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = skb->len;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 5fce7e78a36e..70eb863c7249 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -150,6 +150,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
return -1;
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index e73034fbbde9..0e88364e0c67 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -605,6 +605,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
}
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
@@ -760,6 +761,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
skb->priority = MWIFIEX_PRIO_VI;
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 37f26afd4314..fd7e5b9b4581 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -55,6 +55,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
return -1;
}
+ memset(rx_info, 0, sizeof(*rx_info));
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 9a56bc61cb1d..b0601b91cc4f 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -175,6 +175,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
}
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index e11dab2216c6..832006b5aab1 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -231,9 +231,12 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
*/
static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
{
- __le32 reg;
+ __le32 *reg;
u32 fw_mode;
+ reg = kmalloc(sizeof(*reg), GFP_KERNEL);
+ if (reg == NULL)
+ return -ENOMEM;
/* cannot use rt2x00usb_register_read here as it uses different
* mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
* magic value USB_MODE_AUTORUN (0x11) to the device, thus the
@@ -241,8 +244,9 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
*/
rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
- &reg, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE);
- fw_mode = le32_to_cpu(reg);
+ reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
+ fw_mode = le32_to_cpu(*reg);
+ kfree(reg);
if ((fw_mode & 0x00000003) == 2)
return 1;
@@ -261,6 +265,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
int status;
u32 offset;
u32 length;
+ int retval;
/*
* Check which section of the firmware we need.
@@ -278,7 +283,10 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
/*
* Write firmware to device.
*/
- if (rt2800usb_autorun_detect(rt2x00dev)) {
+ retval = rt2800usb_autorun_detect(rt2x00dev);
+ if (retval < 0)
+ return retval;
+ if (retval) {
rt2x00_info(rt2x00dev,
"Firmware loading not required - NIC in AutoRun mode\n");
} else {
@@ -763,7 +771,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
*/
static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
{
- if (rt2800usb_autorun_detect(rt2x00dev))
+ int retval;
+
+ retval = rt2800usb_autorun_detect(rt2x00dev);
+ if (retval < 0)
+ return retval;
+ if (retval)
return 1;
return rt2800_efuse_detect(rt2x00dev);
}
@@ -772,7 +785,10 @@ static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
int retval;
- if (rt2800usb_efuse_detect(rt2x00dev))
+ retval = rt2800usb_efuse_detect(rt2x00dev);
+ if (retval < 0)
+ return retval;
+ if (retval)
retval = rt2800_read_eeprom_efuse(rt2x00dev);
else
retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1844a47636b6..c65b636bcab9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ /* This always points to the shinfo of the skb being checked, which
+ * could be either the first or the one on the frag_list
+ */
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ /* If this is non-NULL, we are currently checking the frag_list skb, and
+ * this points to the shinfo of the first one
+ */
+ struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags;
+ const bool sharedslot = nr_frags &&
+ frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
int i, err;
- struct sk_buff *first_skb = NULL;
/* Check status of header. */
err = (*gopp_copy)->status;
- (*gopp_copy)++;
if (unlikely(err)) {
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+ /* The first frag might still have this slot mapped */
+ if (!sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_ERROR);
}
+ (*gopp_copy)++;
check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
pending_idx,
gop_map->handle);
/* Had a previous error? Invalidate this fragment. */
- if (unlikely(err))
+ if (unlikely(err)) {
xenvif_idx_unmap(queue, pending_idx);
+ /* If the mapping of the first frag was OK, but
+ * the header's copy failed, and they are
+ * sharing a slot, send an error
+ */
+ if (i == 0 && sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_ERROR);
+ else
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
continue;
}
@@ -1075,42 +1097,53 @@ check_frags:
gop_map->status,
pending_idx,
gop_map->ref);
+
xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
- /* First error: invalidate preceding fragments. */
+
+ /* First error: if the header haven't shared a slot with the
+ * first frag, release it as well.
+ */
+ if (!sharedslot)
+ xenvif_idx_release(queue,
+ XENVIF_TX_CB(skb)->pending_idx,
+ XEN_NETIF_RSP_OKAY);
+
+ /* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(queue, pending_idx);
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
+
+ /* And if we found the error while checking the frag_list, unmap
+ * the first skb's frags
+ */
+ if (first_shinfo) {
+ for (j = 0; j < first_shinfo->nr_frags; j++) {
+ pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
+ xenvif_idx_unmap(queue, pending_idx);
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
- if (skb_has_frag_list(skb)) {
- first_skb = skb;
- skb = shinfo->frag_list;
- shinfo = skb_shinfo(skb);
+ if (skb_has_frag_list(skb) && !first_shinfo) {
+ first_shinfo = skb_shinfo(skb);
+ shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
nr_frags = shinfo->nr_frags;
goto check_frags;
}
- /* There was a mapping error in the frag_list skb. We have to unmap
- * the first skb's frags
- */
- if (first_skb && err) {
- int j;
- shinfo = skb_shinfo(first_skb);
- for (j = 0; j < shinfo->nr_frags; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_unmap(queue, pending_idx);
- }
- }
-
*gopp_map = gop_map;
return err;
}
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
/* Check the remap error code. */
if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
+ /* If there was an error, xenvif_tx_check_gop is
+ * expected to release all the frags which were mapped,
+ * so kfree_skb shouldn't do it again
+ */
skb_shinfo(skb)->nr_frags = 0;
+ if (skb_has_frag_list(skb)) {
+ struct sk_buff *nskb =
+ skb_shinfo(skb)->frag_list;
+ skb_shinfo(nskb)->nr_frags = 0;
+ }
kfree_skb(skb);
continue;
}
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
tx_unmap_op.status);
BUG();
}
-
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
}
static inline int rx_work_todo(struct xenvif_queue *queue)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2ccb4a02368b..055222bae6e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
unsigned int i = 0;
unsigned int num_queues = info->netdev->real_num_tx_queues;
+ netif_carrier_off(info->netdev);
+
for (i = 0; i < num_queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
- /* Stop old i/f to prevent errors whilst we rebuild the state. */
- spin_lock_bh(&queue->rx_lock);
- spin_lock_irq(&queue->tx_lock);
- netif_carrier_off(queue->info->netdev);
- spin_unlock_irq(&queue->tx_lock);
- spin_unlock_bh(&queue->rx_lock);
-
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
queue->tx_evtchn = queue->rx_evtchn = 0;
queue->tx_irq = queue->rx_irq = 0;
+ napi_synchronize(&queue->napi);
+
/* End access and free the pages */
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
/* By now, the queue structures have been set up */
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
- spin_lock_bh(&queue->rx_lock);
- spin_lock_irq(&queue->tx_lock);
/* Step 1: Discard all pending TX packet fragments. */
+ spin_lock_irq(&queue->tx_lock);
xennet_release_tx_bufs(queue);
+ spin_unlock_irq(&queue->tx_lock);
/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+ spin_lock_bh(&queue->rx_lock);
+
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
skb_frag_t *frag;
const struct page *page;
@@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
}
queue->rx.req_prod_pvt = requeue_idx;
+
+ spin_unlock_bh(&queue->rx_lock);
}
/*
@@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
+
notify_remote_via_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq)
notify_remote_via_irq(queue->rx_irq);
- xennet_tx_buf_gc(queue);
- xennet_alloc_rx_buffers(queue);
+ spin_lock_irq(&queue->tx_lock);
+ xennet_tx_buf_gc(queue);
spin_unlock_irq(&queue->tx_lock);
+
+ spin_lock_bh(&queue->rx_lock);
+ xennet_alloc_rx_buffers(queue);
spin_unlock_bh(&queue->rx_lock);
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b777d8f46bd5..9aa012e6ea0a 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,6 +26,54 @@
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
+/*
+ * of_fdt_limit_memory - limit the number of regions in the /memory node
+ * @limit: maximum entries
+ *
+ * Adjust the flattened device tree to have at most 'limit' number of
+ * memory entries in the /memory node. This function may be called
+ * any time after initial_boot_param is set.
+ */
+void of_fdt_limit_memory(int limit)
+{
+ int memory;
+ int len;
+ const void *val;
+ int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
+ int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
+ const uint32_t *addr_prop;
+ const uint32_t *size_prop;
+ int root_offset;
+ int cell_size;
+
+ root_offset = fdt_path_offset(initial_boot_params, "/");
+ if (root_offset < 0)
+ return;
+
+ addr_prop = fdt_getprop(initial_boot_params, root_offset,
+ "#address-cells", NULL);
+ if (addr_prop)
+ nr_address_cells = fdt32_to_cpu(*addr_prop);
+
+ size_prop = fdt_getprop(initial_boot_params, root_offset,
+ "#size-cells", NULL);
+ if (size_prop)
+ nr_size_cells = fdt32_to_cpu(*size_prop);
+
+ cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
+
+ memory = fdt_path_offset(initial_boot_params, "/memory");
+ if (memory > 0) {
+ val = fdt_getprop(initial_boot_params, memory, "reg", &len);
+ if (len > limit*cell_size) {
+ len = limit*cell_size;
+ pr_debug("Limiting number of entries to %d\n", limit);
+ fdt_setprop(initial_boot_params, memory, "reg", val,
+ len);
+ }
+ }
+}
+
/**
* of_fdt_is_compatible - Return true if given node from the given blob has
* compat in its compatible list
@@ -937,7 +985,7 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
}
#endif
-bool __init early_init_dt_scan(void *params)
+bool __init early_init_dt_verify(void *params)
{
if (!params)
return false;
@@ -951,6 +999,12 @@ bool __init early_init_dt_scan(void *params)
return false;
}
+ return true;
+}
+
+
+void __init early_init_dt_scan_nodes(void)
+{
/* Retrieve various information from the /chosen node */
of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
@@ -959,7 +1013,17 @@ bool __init early_init_dt_scan(void *params)
/* Setup memory, calling early_init_dt_add_memory_arch */
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
+
+bool __init early_init_dt_scan(void *params)
+{
+ bool status;
+
+ status = early_init_dt_verify(params);
+ if (!status)
+ return false;
+ early_init_dt_scan_nodes();
return true;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index a3bf2122a8d5..401b2453da45 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
}
EXPORT_SYMBOL(of_mdiobus_register);
-/**
- * of_mdiobus_link_phydev - Find a device node for a phy
- * @mdio: pointer to mii_bus structure
- * @phydev: phydev for which the of_node pointer should be set
- *
- * Walk the list of subnodes of a mdio bus and look for a node that matches the
- * phy's address with its 'reg' property. If found, set the of_node pointer for
- * the phy. This allows auto-probed pyh devices to be supplied with information
- * passed in via DT.
- */
-void of_mdiobus_link_phydev(struct mii_bus *mdio,
- struct phy_device *phydev)
-{
- struct device *dev = &phydev->dev;
- struct device_node *child;
-
- if (dev->of_node || !mdio->dev.of_node)
- return;
-
- for_each_available_child_of_node(mdio->dev.of_node, child) {
- int addr;
-
- addr = of_mdio_parse_addr(&mdio->dev, child);
- if (addr < 0)
- continue;
-
- if (addr == phydev->addr) {
- dev->of_node = child;
- return;
- }
- }
-}
-EXPORT_SYMBOL(of_mdiobus_link_phydev);
-
/* Helper function for of_phy_find_device */
static int of_phy_match(struct device *dev, void *phy_np)
{
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 2872ece81f35..44333bd8f908 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -5,6 +5,12 @@
# Parport configuration.
#
+config ARCH_MIGHT_HAVE_PC_PARPORT
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the architecture might have PC parallel port hardware.
+
menuconfig PARPORT
tristate "Parallel port support"
depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
If unsure, say Y.
-config ARCH_MIGHT_HAVE_PC_PARPORT
- bool
- help
- Select this config option from the architecture Kconfig if
- the architecture might have PC parallel port hardware.
-
if PARPORT
config PARPORT_PC
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 63a54a340863..1c8592b0e146 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3135,8 +3135,13 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
if (probe)
return 0;
- /* Wait for Transaction Pending bit clean */
- if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
+ /*
+ * Wait for Transaction Pending bit to clear. A word-aligned test
+ * is used, so we use the conrol offset rather than status and shift
+ * the test bit to match.
+ */
+ if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
+ PCI_AF_STATUS_TP << 8))
goto clear;
dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 16a2f067c242..64b98d242ea6 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -112,6 +112,7 @@ config PHY_EXYNOS5250_SATA
config PHY_SUN4I_USB
tristate "Allwinner sunxi SoC USB PHY driver"
depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on RESET_CONTROLLER
select GENERIC_PHY
help
Enable this to support the transceiver that is part of Allwinner
@@ -122,6 +123,7 @@ config PHY_SUN4I_USB
config PHY_SAMSUNG_USB2
tristate "Samsung USB 2.0 PHY driver"
+ depends on HAS_IOMEM
select GENERIC_PHY
select MFD_SYSCON
help
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index c64a2f3b2d62..49c446530101 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -614,8 +614,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
return phy;
put_dev:
- put_device(&phy->dev);
- ida_remove(&phy_ida, phy->id);
+ put_device(&phy->dev); /* calls phy_release() which frees resources */
+ return ERR_PTR(ret);
+
free_phy:
kfree(phy);
return ERR_PTR(ret);
@@ -799,7 +800,7 @@ static void phy_release(struct device *dev)
phy = to_phy(dev);
dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
- ida_remove(&phy_ida, phy->id);
+ ida_simple_remove(&phy_ida, phy->id);
kfree(phy);
}
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 7007c11fe07d..34b396146c8a 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -233,8 +233,8 @@ static int omap_usb2_probe(struct platform_device *pdev)
if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
- if (!phy->phy_base)
- return -ENOMEM;
+ if (IS_ERR(phy->phy_base))
+ return PTR_ERR(phy->phy_base);
phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
}
@@ -262,7 +262,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
otg->phy = &phy->phy;
platform_set_drvdata(pdev, phy);
- pm_runtime_enable(phy->dev);
generic_phy = devm_phy_create(phy->dev, &ops, NULL);
if (IS_ERR(generic_phy))
@@ -270,10 +269,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy_set_drvdata(generic_phy, phy);
+ pm_runtime_enable(phy->dev);
phy_provider = devm_of_phy_provider_register(phy->dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
+ if (IS_ERR(phy_provider)) {
+ pm_runtime_disable(phy->dev);
return PTR_ERR(phy_provider);
+ }
phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
if (IS_ERR(phy->wkupclk)) {
@@ -317,6 +319,7 @@ static int omap_usb2_remove(struct platform_device *pdev)
if (!IS_ERR(phy->optclk))
clk_unprepare(phy->optclk);
usb_remove_phy(&phy->phy);
+ pm_runtime_disable(phy->dev);
return 0;
}
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
index 8a8c6bc8709a..1e69a32c221d 100644
--- a/drivers/phy/phy-samsung-usb2.c
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -107,6 +107,7 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = {
#endif
{ },
};
+MODULE_DEVICE_TABLE(of, samsung_usb2_phy_of_match);
static int samsung_usb2_phy_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index edf5d2fd2b22..86db2235ab00 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -320,7 +320,7 @@ int berlin_pinctrl_probe(struct platform_device *pdev,
regmap = dev_get_regmap(&pdev->dev, NULL);
if (!regmap)
- return PTR_ERR(regmap);
+ return -ENODEV;
pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1bd6363bc95e..9f43916637ca 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
status = readl(info->irqmux_base);
- for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+ for_each_set_bit(n, &status, info->nbanks)
__gpio_irq_handler(&info->banks[n]);
chained_irq_exit(chip, desc);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index f1ca75e6d7b1..5f38c7f67834 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -211,6 +211,10 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
configlen++;
pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
+ if (!pinconfig) {
+ kfree(*map);
+ return -ENOMEM;
+ }
if (!of_property_read_u32(node, "allwinner,drive", &val)) {
u16 strength = (val + 1) * 10;
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index b81448b2c75d..a5c6cb773e5f 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -319,8 +319,7 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
struct pnp_dev *pnp = _pnp;
/* true means it matched */
- return !acpi->physical_node_count
- && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
+ return pnp->data == acpi;
}
static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 9b60b1f3261c..44341dc5b148 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
"desc %p not ACKed\n", tx_desc);
}
+ if (ret == NULL) {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: unable to obtain tx descriptor\n", __func__);
+ goto err_out;
+ }
+
i = bdma_chan->wr_count_next % bdma_chan->bd_num;
if (i == bdma_chan->bd_num - 1) {
i = 0;
@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
tx_desc->txd.phys = bdma_chan->bd_phys +
i * sizeof(struct tsi721_dma_desc);
tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
-
+err_out:
spin_unlock_bh(&bdma_chan->lock);
return ret;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 15b3459f8656..220acb4cbee5 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
} else
raw3270_writesf_readpart(rp);
memset(&rp->init_reset, 0, sizeof(rp->init_reset));
- memset(&rp->init_data, 0, sizeof(rp->init_data));
}
static int
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 69ef4f8cfac8..4038437ff033 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
int rc;
ap_dev->drv = ap_drv;
+
+ spin_lock_bh(&ap_device_list_lock);
+ list_add(&ap_dev->list, &ap_device_list);
+ spin_unlock_bh(&ap_device_list_lock);
+
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
- if (!rc) {
+ if (rc) {
spin_lock_bh(&ap_device_list_lock);
- list_add(&ap_dev->list, &ap_device_list);
+ list_del_init(&ap_dev->list);
spin_unlock_bh(&ap_device_list_lock);
}
return rc;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f7e316368c99..3f50dfcb3227 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -733,6 +733,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_next_command(cmd);
return;
}
+ } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
+ /*
+ * Certain non BLOCK_PC requests are commands that don't
+ * actually transfer anything (FLUSH), so cannot use
+ * good_bytes != blk_rq_bytes(req) as the signal for an error.
+ * This sets the error explicitly for the problem case.
+ */
+ error = __scsi_error_from_host_byte(cmd, result);
}
/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 6f54ff4f9372..72913b299047 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -182,7 +182,7 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
{
int i;
- if (!file->is_master)
+ if (!drm_is_master(file))
return;
for (i = 0; i < MAX_CRTC; i++)
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 78b0fba7047e..8afc6fee40c5 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_OMAP4
bool "OMAP 4 Camera support"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+ depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
select VIDEOBUF2_DMA_CONTIG
---help---
Driver for an OMAP 4 ISS controller.
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 8b25c1aa2025..ebb19b22f47f 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -530,8 +530,10 @@ int rtw_resume_process23a(struct rtw_adapter *padapter)
pwrpriv->bkeepfwalive = false;
DBG_8723A("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
- if (pm_netdev_open23a(pnetdev, true) != 0)
+ if (pm_netdev_open23a(pnetdev, true) != 0) {
+ up(&pwrpriv->lock);
goto exit;
+ }
netif_device_attach(pnetdev);
netif_carrier_on(pnetdev);
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index 59679cd46816..69b80e80b011 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -981,7 +981,7 @@ start:
pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
}
- {
+ if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
pDevice->byReAssocCount++;
/* 10 sec timeout */
if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1d3908d044d0..5a5fd937a442 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -2318,6 +2318,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
int handled = 0;
unsigned char byData = 0;
int ii = 0;
+ unsigned long flags;
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
@@ -2331,7 +2332,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
handled = 1;
MACvIntDisable(pDevice->PortOffset);
- spin_lock_irq(&pDevice->lock);
+
+ spin_lock_irqsave(&pDevice->lock, flags);
//Make sure current page is 0
VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
@@ -2560,7 +2562,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
if (byOrgPageSel == 1)
MACvSelectPage1(pDevice->PortOffset);
- spin_unlock_irq(&pDevice->lock);
+ spin_unlock_irqrestore(&pDevice->lock, flags);
+
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
return IRQ_RETVAL(handled);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index a99c63152b8d..2c516f2eebed 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -306,7 +306,7 @@ static int imx_get_sensor_data(struct platform_device *pdev)
{
struct imx_thermal_data *data = platform_get_drvdata(pdev);
struct regmap *map;
- int t1, t2, n1, n2;
+ int t1, n1;
int ret;
u32 val;
u64 temp64;
@@ -333,14 +333,10 @@ static int imx_get_sensor_data(struct platform_device *pdev)
/*
* Sensor data layout:
* [31:20] - sensor value @ 25C
- * [19:8] - sensor value of hot
- * [7:0] - hot temperature value
* Use universal formula now and only need sensor value @ 25C
* slope = 0.4297157 - (0.0015976 * 25C fuse)
*/
n1 = val >> 20;
- n2 = (val & 0xfff00) >> 8;
- t2 = val & 0xff;
t1 = 25; /* t1 always 25C */
/*
@@ -366,16 +362,16 @@ static int imx_get_sensor_data(struct platform_device *pdev)
data->c2 = n1 * data->c1 + 1000 * t1;
/*
- * Set the default passive cooling trip point to 20 °C below the
- * maximum die temperature. Can be changed from userspace.
+ * Set the default passive cooling trip point,
+ * can be changed from userspace.
*/
- data->temp_passive = 1000 * (t2 - 20);
+ data->temp_passive = IMX_TEMP_PASSIVE;
/*
- * The maximum die temperature is t2, let's give 5 °C cushion
- * for noise and possible temperature rise between measurements.
+ * The maximum die temperature set to 20 C higher than
+ * IMX_TEMP_PASSIVE.
*/
- data->temp_critical = 1000 * (t2 - 5);
+ data->temp_critical = 1000 * 20 + data->temp_passive;
return 0;
}
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 04b1be7fa018..4b2b999b7611 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -156,8 +156,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
ret = thermal_zone_bind_cooling_device(thermal,
tbp->trip_id, cdev,
- tbp->min,
- tbp->max);
+ tbp->max,
+ tbp->min);
if (ret)
return ret;
}
@@ -712,11 +712,12 @@ thermal_of_build_thermal_zone(struct device_node *np)
}
i = 0;
- for_each_child_of_node(child, gchild)
+ for_each_child_of_node(child, gchild) {
ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
tz->trips, tz->ntrips);
if (ret)
goto free_tbps;
+ }
finish:
of_node_put(child);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index fdb07199d9c2..1967bee4f076 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
return NULL;
}
+static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
+{
+ unsigned long temp;
+ return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
+}
+
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
{
struct thermal_hwmon_device *hwmon;
@@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
if (result)
goto free_temp_mem;
- if (tz->ops->get_crit_temp) {
- unsigned long temperature;
- if (!tz->ops->get_crit_temp(tz, &temperature)) {
- snprintf(temp->temp_crit.name,
- sizeof(temp->temp_crit.name),
+ if (thermal_zone_crit_temp_valid(tz)) {
+ snprintf(temp->temp_crit.name,
+ sizeof(temp->temp_crit.name),
"temp%d_crit", hwmon->count);
- temp->temp_crit.attr.attr.name = temp->temp_crit.name;
- temp->temp_crit.attr.attr.mode = 0444;
- temp->temp_crit.attr.show = temp_crit_show;
- sysfs_attr_init(&temp->temp_crit.attr.attr);
- result = device_create_file(hwmon->device,
- &temp->temp_crit.attr);
- if (result)
- goto unregister_input;
- }
+ temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+ temp->temp_crit.attr.attr.mode = 0444;
+ temp->temp_crit.attr.show = temp_crit_show;
+ sysfs_attr_init(&temp->temp_crit.attr.attr);
+ result = device_create_file(hwmon->device,
+ &temp->temp_crit.attr);
+ if (result)
+ goto unregister_input;
}
mutex_lock(&thermal_hwmon_list_lock);
@@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
}
device_remove_file(hwmon->device, &temp->temp_input.attr);
- if (tz->ops->get_crit_temp)
+ if (thermal_zone_crit_temp_valid(tz))
device_remove_file(hwmon->device, &temp->temp_crit.attr);
mutex_lock(&thermal_hwmon_list_lock);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index a1271b55103a..634b6ce0e63a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1155,7 +1155,7 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
/* register shadow for context save and restore */
bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) *
bgp->conf->sensor_count, GFP_KERNEL);
- if (!bgp) {
+ if (!bgp->regval) {
dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index c9f5c9dcc15c..008c223eaf26 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -177,7 +177,7 @@ static void arc_serial_tx_chars(struct arc_uart_port *uart)
uart->port.icount.tx++;
uart->port.x_char = 0;
sent = 1;
- } else if (xmit->tail != xmit->head) { /* TODO: uart_circ_empty */
+ } else if (!uart_circ_empty(xmit)) {
ch = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx++;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index e2f93874989b..044e86d528ae 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -567,6 +567,9 @@ static void imx_start_tx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
+ if (uart_circ_empty(&port->state->xmit))
+ return;
+
if (USE_IRDA(sport)) {
/* half duplex in IrDA mode; have to disable receive mode */
temp = readl(sport->port.membase + UCR4);
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 1efd4c36ba0c..99b7b8697861 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -603,6 +603,8 @@ static void ip22zilog_start_tx(struct uart_port *port)
} else {
struct circ_buf *xmit = &port->state->xmit;
+ if (uart_circ_empty(xmit))
+ return;
writeb(xmit->buf[xmit->tail], &channel->data);
ZSDELAY();
ZS_WSYNC(channel);
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 68f2c53e0b54..5702828fb62e 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -266,9 +266,11 @@ static void m32r_sio_start_tx(struct uart_port *port)
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
- serial_out(up, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- up->port.icount.tx++;
+ if (!uart_circ_empty(xmit)) {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ }
}
while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY);
#else
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 8193635103ee..f7ad5b903055 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -653,6 +653,8 @@ static void pmz_start_tx(struct uart_port *port)
} else {
struct circ_buf *xmit = &port->state->xmit;
+ if (uart_circ_empty(xmit))
+ goto out;
write_zsdata(uap, xmit->buf[xmit->tail]);
zssync(uap);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -661,6 +663,7 @@ static void pmz_start_tx(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
}
+ out:
pmz_debug("pmz: start_tx() done.\n");
}
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 80a58eca785b..2f57df9a71d9 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -427,6 +427,9 @@ static void sunsab_start_tx(struct uart_port *port)
struct circ_buf *xmit = &up->port.state->xmit;
int i;
+ if (uart_circ_empty(xmit))
+ return;
+
up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
writeb(up->interrupt_mask1, &up->regs->w.imr1);
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index a85db8b87156..02df3940b95e 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -703,6 +703,8 @@ static void sunzilog_start_tx(struct uart_port *port)
} else {
struct circ_buf *xmit = &port->state->xmit;
+ if (uart_circ_empty(xmit))
+ return;
writeb(xmit->buf[xmit->tail], &channel->data);
ZSDELAY();
ZS_WSYNC(channel);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9d2b673f90e3..b8125aa64ad8 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep,
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
cap |= QH_IOS;
- if (hwep->num)
- cap |= QH_ZLT;
+
+ cap |= QH_ZLT;
cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
/*
* For ISO-TX, we set mult at QH as the largest value, and use
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 21b99b4b4082..0e950ad8cb25 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
if (!hub_is_superspeed(hub->hdev))
return -EINVAL;
+ ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
+ * Controller [1022:7814] will have spurious result making the following
+ * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
+ * as high-speed device if we set the usb 3.0 port link state to
+ * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
+ * check the state here to avoid the bug.
+ */
+ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_RX_DETECT) {
+ dev_dbg(&hub->ports[port1 - 1]->dev,
+ "Not disabling port; link state is RxDetect\n");
+ return ret;
+ }
+
ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
if (ret)
return ret;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 762e4a5f5ae9..330df5ce435b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 115662c16dcc..8a3813be1b28 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
- { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) },
+ { USB_DEVICE(TESTO_VID, TESTO_1_PID) },
+ { USB_DEVICE(TESTO_VID, TESTO_3_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
@@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
+ /* Infineon Devices */
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 500474c48f4b..c4777bc6aee0 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -584,6 +584,12 @@
#define RATOC_PRODUCT_ID_USB60F 0xb020
/*
+ * Infineon Technologies
+ */
+#define INFINEON_VID 0x058b
+#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+
+/*
* Acton Research Corp.
*/
#define ACTON_VID 0x0647 /* Vendor ID */
@@ -798,7 +804,8 @@
* Submitted by Colin Leroy
*/
#define TESTO_VID 0x128D
-#define TESTO_USB_INTERFACE_PID 0x0001
+#define TESTO_1_PID 0x0001
+#define TESTO_3_PID 0x0003
/*
* Mobility Electronics products.
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ac73f49cd9f0..a9688940543d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1487,6 +1487,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b7a506f2bb14..5c660c77f03b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
* p2m are consistent.
*/
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- unsigned long p;
- struct page *scratch_page = get_balloon_scratch_page();
-
if (!PageHighMem(page)) {
+ struct page *scratch_page = get_balloon_scratch_page();
+
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte(page_to_pfn(scratch_page),
PAGE_KERNEL_RO), 0);
BUG_ON(ret);
- }
- p = page_to_pfn(scratch_page);
- __set_phys_to_machine(pfn, pfn_to_mfn(p));
- put_balloon_scratch_page();
+ put_balloon_scratch_page();
+ }
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
#endif
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 5d4de88fe5b8..eeba7544f0cd 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1195,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries)
int gnttab_init(void)
{
int i;
+ unsigned long max_nr_grant_frames;
unsigned int max_nr_glist_frames, nr_glist_frames;
unsigned int nr_init_grefs;
int ret;
gnttab_request_version();
+ max_nr_grant_frames = gnttab_max_grant_frames();
nr_grant_frames = 1;
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
BUG_ON(grefs_per_grant_frame == 0);
- max_nr_glist_frames = (gnttab_max_grant_frames() *
+ max_nr_glist_frames = (max_nr_grant_frames *
grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1223,6 +1225,11 @@ int gnttab_init(void)
}
}
+ ret = arch_gnttab_init(max_nr_grant_frames,
+ nr_status_frames(max_nr_grant_frames));
+ if (ret < 0)
+ goto ini_nomem;
+
if (gnttab_setup() < 0) {
ret = -ENODEV;
goto ini_nomem;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c3667b202f2f..5f1e1f3cd186 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -88,7 +88,6 @@ static int xen_suspend(void *data)
if (!si->cancelled) {
xen_irq_resume();
- xen_console_resume();
xen_timer_resume();
}
@@ -135,6 +134,10 @@ static void do_suspend(void)
err = stop_machine(xen_suspend, &si, cpumask_of(0));
+ /* Resume console as early as possible. */
+ if (!si.cancelled)
+ xen_console_resume();
+
raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/firmware/Makefile b/firmware/Makefile
index 5747417069ca..0862d34cf7d1 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -219,6 +219,12 @@ $(obj)/%.fw: $(obj)/%.H16 $(ihex2fw_dep)
obj-y += $(patsubst %,%.gen.o, $(fw-external-y))
obj-$(CONFIG_FIRMWARE_IN_KERNEL) += $(patsubst %,%.gen.o, $(fw-shipped-y))
+ifeq ($(KBUILD_SRC),)
+# Makefile.build only creates subdirectories for O= builds, but external
+# firmware might live outside the kernel source tree
+_dummy := $(foreach d,$(addprefix $(obj)/,$(dir $(fw-external-y))), $(shell [ -d $(d) ] || mkdir -p $(d)))
+endif
+
# Remove .S files and binaries created from ihex
# (during 'make clean' .config isn't included so they're all in $(fw-shipped-))
targets := $(fw-shipped-) $(patsubst $(obj)/%,%, \
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 42dd2e499ed8..35de0c04729f 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -55,13 +55,13 @@ static int __init afs_get_client_UUID(void)
afs_uuid.time_low = uuidtime;
afs_uuid.time_mid = uuidtime >> 32;
afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
- afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME;
+ afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
get_random_bytes(&clockseq, 2);
afs_uuid.clock_seq_low = clockseq;
afs_uuid.clock_seq_hi_and_reserved =
(clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
- afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD;
+ afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
_debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
afs_uuid.time_low,
diff --git a/fs/aio.c b/fs/aio.c
index 955947ef3e02..1c9c5f0a9e2b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
{
struct kioctx_cpu *kcpu;
+ unsigned long flags;
preempt_disable();
kcpu = this_cpu_ptr(ctx->cpu);
+ local_irq_save(flags);
kcpu->reqs_available += nr;
+
while (kcpu->reqs_available >= ctx->req_batch * 2) {
kcpu->reqs_available -= ctx->req_batch;
atomic_add(ctx->req_batch, &ctx->reqs_available);
}
+ local_irq_restore(flags);
preempt_enable();
}
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
{
struct kioctx_cpu *kcpu;
bool ret = false;
+ unsigned long flags;
preempt_disable();
kcpu = this_cpu_ptr(ctx->cpu);
+ local_irq_save(flags);
if (!kcpu->reqs_available) {
int old, avail = atomic_read(&ctx->reqs_available);
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
ret = true;
kcpu->reqs_available--;
out:
+ local_irq_restore(flags);
preempt_enable();
return ret;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e12441c7cf1d..7187b14faa6c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
log_list);
list_del_init(&ordered->log_list);
spin_unlock_irq(&log->log_extents_lock[index]);
+
+ if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
+ !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
+ struct inode *inode = ordered->inode;
+ u64 start = ordered->file_offset;
+ u64 end = ordered->file_offset + ordered->len - 1;
+
+ WARN_ON(!inode);
+ filemap_fdatawrite_range(inode->i_mapping, start, end);
+ }
wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
&ordered->flags));
+
btrfs_put_ordered_extent(ordered);
spin_lock_irq(&log->log_extents_lock[index]);
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6104676857f5..6cb82f62cb7c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1680,11 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
if (device->bdev == root->fs_info->fs_devices->latest_bdev)
root->fs_info->fs_devices->latest_bdev = next_device->bdev;
- if (device->bdev)
+ if (device->bdev) {
device->fs_devices->open_devices--;
-
- /* remove sysfs entry */
- btrfs_kobj_rm_device(root->fs_info, device);
+ /* remove sysfs entry */
+ btrfs_kobj_rm_device(root->fs_info, device);
+ }
call_rcu(&device->rcu, free_device);
diff --git a/fs/coredump.c b/fs/coredump.c
index 0b2528fb640e..a93f7e6ea4cf 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
if (unlikely(nr < 0))
return nr;
- tsk->flags = PF_DUMPCORE;
+ tsk->flags |= PF_DUMPCORE;
if (atomic_read(&mm->mm_users) == nr + 1)
goto done;
/*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 98040ba388ac..17e39b047de5 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -71,7 +71,6 @@ struct dio_submit {
been performed at the start of a
write */
int pages_in_io; /* approximate total IO pages */
- size_t size; /* total request size (doesn't change)*/
sector_t block_in_file; /* Current offset into the underlying
file in dio_block units. */
unsigned blocks_available; /* At block_in_file. changes */
@@ -198,9 +197,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
* L1 cache.
*/
static inline struct page *dio_get_page(struct dio *dio,
- struct dio_submit *sdio, size_t *from, size_t *to)
+ struct dio_submit *sdio)
{
- int n;
if (dio_pages_present(sdio) == 0) {
int ret;
@@ -209,10 +207,7 @@ static inline struct page *dio_get_page(struct dio *dio,
return ERR_PTR(ret);
BUG_ON(dio_pages_present(sdio) == 0);
}
- n = sdio->head++;
- *from = n ? 0 : sdio->from;
- *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
- return dio->pages[n];
+ return dio->pages[sdio->head];
}
/**
@@ -911,11 +906,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
while (sdio->block_in_file < sdio->final_block_in_request) {
struct page *page;
size_t from, to;
- page = dio_get_page(dio, sdio, &from, &to);
+
+ page = dio_get_page(dio, sdio);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
+ from = sdio->head ? 0 : sdio->from;
+ to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
+ sdio->head++;
while (from < to) {
unsigned this_chunk_bytes; /* # of bytes mapped */
@@ -1104,7 +1103,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
unsigned blkbits = i_blkbits;
unsigned blocksize_mask = (1 << blkbits) - 1;
ssize_t retval = -EINVAL;
- loff_t end = offset + iov_iter_count(iter);
+ size_t count = iov_iter_count(iter);
+ loff_t end = offset + count;
struct dio *dio;
struct dio_submit sdio = { 0, };
struct buffer_head map_bh = { 0, };
@@ -1287,10 +1287,9 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
*/
BUG_ON(retval == -EIOCBQUEUED);
if (dio->is_async && retval == 0 && dio->result &&
- ((rw == READ) || (dio->result == sdio.size)))
+ (rw == READ || dio->result == count))
retval = -EIOCBQUEUED;
-
- if (retval != -EIOCBQUEUED)
+ else
dio_await_completion(dio);
if (drop_refcount(dio) == 0) {
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 3f5c188953a4..0b7e28e7eaa4 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -966,10 +966,10 @@ retry:
continue;
}
- if (ei->i_es_lru_nr == 0 || ei == locked_ei)
+ if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
+ !write_trylock(&ei->i_es_lock))
continue;
- write_lock(&ei->i_es_lock);
shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
if (ei->i_es_lru_nr == 0)
list_del_init(&ei->i_es_lru);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index a87455df38bc..5b87fc36aab8 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -338,7 +338,7 @@ out:
fatal = err;
} else {
ext4_error(sb, "bit already cleared for inode %lu", ino);
- if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+ if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
int count;
count = ext4_free_inodes_count(sb, gdp);
percpu_counter_sub(&sbi->s_freeinodes_counter,
@@ -874,6 +874,13 @@ got:
goto out;
}
+ BUFFER_TRACE(group_desc_bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, group_desc_bh);
+ if (err) {
+ ext4_std_error(sb, err);
+ goto out;
+ }
+
/* We may have to initialize the block bitmap if it isn't already */
if (ext4_has_group_desc_csum(sb) &&
gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -910,13 +917,6 @@ got:
}
}
- BUFFER_TRACE(group_desc_bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, group_desc_bh);
- if (err) {
- ext4_std_error(sb, err);
- goto out;
- }
-
/* Update the relevant bg descriptor fields */
if (ext4_has_group_desc_csum(sb)) {
int free;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 7f72f50a8fa7..2dcb936be90e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -752,8 +752,8 @@ void ext4_mb_generate_buddy(struct super_block *sb,
if (free != grp->bb_free) {
ext4_grp_locked_error(sb, group, 0, 0,
- "%u clusters in bitmap, %u in gd; "
- "block bitmap corrupt.",
+ "block bitmap and bg descriptor "
+ "inconsistent: %u vs %u free clusters",
free, grp->bb_free);
/*
* If we intend to continue, we consider group descriptor
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b9b9aabfb4d2..6df7bc611dbd 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1525,8 +1525,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
sbi->s_commit_interval = HZ * arg;
} else if (token == Opt_max_batch_time) {
- if (arg == 0)
- arg = EXT4_DEF_MAX_BATCH_TIME;
sbi->s_max_batch_time = arg;
} else if (token == Opt_min_batch_time) {
sbi->s_min_batch_time = arg;
@@ -2809,10 +2807,11 @@ static void print_daily_error_info(unsigned long arg)
es = sbi->s_es;
if (es->s_error_count)
- ext4_msg(sb, KERN_NOTICE, "error count: %u",
+ /* fsck newer than v1.41.13 is needed to clean this condition. */
+ ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
le32_to_cpu(es->s_error_count));
if (es->s_first_error_time) {
- printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
+ printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
sb->s_id, le32_to_cpu(es->s_first_error_time),
(int) sizeof(es->s_first_error_func),
es->s_first_error_func,
@@ -2826,7 +2825,7 @@ static void print_daily_error_info(unsigned long arg)
printk("\n");
}
if (es->s_last_error_time) {
- printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
+ printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
sb->s_id, le32_to_cpu(es->s_last_error_time),
(int) sizeof(es->s_last_error_func),
es->s_last_error_func,
@@ -3880,38 +3879,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount2;
}
}
-
- /*
- * set up enough so that it can read an inode,
- * and create new inode for buddy allocator
- */
- sbi->s_gdb_count = db_count;
- if (!test_opt(sb, NOLOAD) &&
- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
- sb->s_op = &ext4_sops;
- else
- sb->s_op = &ext4_nojournal_sops;
-
- ext4_ext_init(sb);
- err = ext4_mb_init(sb);
- if (err) {
- ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
- err);
- goto failed_mount2;
- }
-
if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
- goto failed_mount2a;
+ goto failed_mount2;
}
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
if (!ext4_fill_flex_info(sb)) {
ext4_msg(sb, KERN_ERR,
"unable to initialize "
"flex_bg meta info!");
- goto failed_mount2a;
+ goto failed_mount2;
}
+ sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
@@ -3946,6 +3926,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_stripe = ext4_get_stripe_size(sbi);
sbi->s_extent_max_zeroout_kb = 32;
+ /*
+ * set up enough so that it can read an inode
+ */
+ if (!test_opt(sb, NOLOAD) &&
+ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
+ sb->s_op = &ext4_sops;
+ else
+ sb->s_op = &ext4_nojournal_sops;
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
#ifdef CONFIG_QUOTA
@@ -4135,13 +4123,21 @@ no_journal:
if (err) {
ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
"reserved pool", ext4_calculate_resv_clusters(sb));
- goto failed_mount5;
+ goto failed_mount4a;
}
err = ext4_setup_system_zone(sb);
if (err) {
ext4_msg(sb, KERN_ERR, "failed to initialize system "
"zone (%d)", err);
+ goto failed_mount4a;
+ }
+
+ ext4_ext_init(sb);
+ err = ext4_mb_init(sb);
+ if (err) {
+ ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
+ err);
goto failed_mount5;
}
@@ -4218,8 +4214,11 @@ failed_mount8:
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
- ext4_release_system_zone(sb);
+ ext4_mb_release(sb);
failed_mount5:
+ ext4_ext_release(sb);
+ ext4_release_system_zone(sb);
+failed_mount4a:
dput(sb->s_root);
sb->s_root = NULL;
failed_mount4:
@@ -4243,14 +4242,11 @@ failed_mount3:
percpu_counter_destroy(&sbi->s_extent_cache_cnt);
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
-failed_mount2a:
- ext4_mb_release(sb);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
ext4_kvfree(sbi->s_group_desc);
failed_mount:
- ext4_ext_release(sb);
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
if (sbi->s_proc) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 0924521306b4..f8cf619edb5f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -608,8 +608,8 @@ static int __allocate_data_block(struct dnode_of_data *dn)
* b. do not use extent cache for better performance
* c. give the block addresses to blockdev
*/
-static int get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
+static int __get_data_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create, bool fiemap)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
unsigned int blkbits = inode->i_sb->s_blocksize_bits;
@@ -637,7 +637,7 @@ static int get_data_block(struct inode *inode, sector_t iblock,
err = 0;
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR)
+ if (dn.data_blkaddr == NEW_ADDR && !fiemap)
goto put_out;
if (dn.data_blkaddr != NULL_ADDR) {
@@ -671,7 +671,7 @@ get_next:
err = 0;
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR)
+ if (dn.data_blkaddr == NEW_ADDR && !fiemap)
goto put_out;
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
@@ -708,10 +708,23 @@ out:
return err;
}
+static int get_data_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ return __get_data_block(inode, iblock, bh_result, create, false);
+}
+
+static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ return __get_data_block(inode, iblock, bh_result, create, true);
+}
+
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
- return generic_block_fiemap(inode, fieinfo, start, len, get_data_block);
+ return generic_block_fiemap(inode, fieinfo,
+ start, len, get_data_block_fiemap);
}
static int f2fs_read_data_page(struct file *file, struct page *page)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 966acb039e3b..a4addd72ebbd 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -376,11 +376,11 @@ static struct page *init_inode_metadata(struct inode *inode,
put_error:
f2fs_put_page(page, 1);
+error:
/* once the failed inode becomes a bad inode, i_mode is S_IFREG */
truncate_inode_pages(&inode->i_data, 0);
truncate_blocks(inode, 0);
remove_dirty_dir_inode(inode);
-error:
remove_inode_page(inode);
return ERR_PTR(err);
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e51c732b0dd9..58df97e174d0 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -342,9 +342,6 @@ struct f2fs_sm_info {
struct dirty_seglist_info *dirty_info; /* dirty segment information */
struct curseg_info *curseg_array; /* active segment information */
- struct list_head wblist_head; /* list of under-writeback pages */
- spinlock_t wblist_lock; /* lock for checkpoint */
-
block_t seg0_blkaddr; /* block address of 0'th segment */
block_t main_blkaddr; /* start block address of main area */
block_t ssa_blkaddr; /* start block address of SSA area */
@@ -644,7 +641,8 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
*/
static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
{
- WARN_ON((nid >= NM_I(sbi)->max_nid));
+ if (unlikely(nid < F2FS_ROOT_INO(sbi)))
+ return -EINVAL;
if (unlikely(nid >= NM_I(sbi)->max_nid))
return -EINVAL;
return 0;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index c58e33075719..7d8b96275092 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -659,16 +659,19 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
off_start = offset & (PAGE_CACHE_SIZE - 1);
off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ f2fs_lock_op(sbi);
+
for (index = pg_start; index <= pg_end; index++) {
struct dnode_of_data dn;
- f2fs_lock_op(sbi);
+ if (index == pg_end && !off_end)
+ goto noalloc;
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_reserve_block(&dn, index);
- f2fs_unlock_op(sbi);
if (ret)
break;
-
+noalloc:
if (pg_start == pg_end)
new_size = offset + len;
else if (index == pg_start && off_start)
@@ -683,8 +686,9 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
i_size_read(inode) < new_size) {
i_size_write(inode, new_size);
mark_inode_dirty(inode);
- f2fs_write_inode(inode, NULL);
+ update_inode_page(inode);
}
+ f2fs_unlock_op(sbi);
return ret;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index adc622c6bdce..2cf6962f6cc8 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -78,6 +78,7 @@ static int do_read_inode(struct inode *inode)
if (check_nid_range(sbi, inode->i_ino)) {
f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
(unsigned long) inode->i_ino);
+ WARN_ON(1);
return -EINVAL;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 9138c32aa698..a6bdddc33ce2 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -417,9 +417,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
- down_write(&F2FS_I(old_inode)->i_sem);
- F2FS_I(old_inode)->i_pino = new_dir->i_ino;
- up_write(&F2FS_I(old_inode)->i_sem);
new_inode->i_ctime = CURRENT_TIME;
down_write(&F2FS_I(new_inode)->i_sem);
@@ -448,6 +445,10 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
+ down_write(&F2FS_I(old_inode)->i_sem);
+ file_lost_pino(old_inode);
+ up_write(&F2FS_I(old_inode)->i_sem);
+
old_inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(old_inode);
@@ -457,9 +458,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (old_dir != new_dir) {
f2fs_set_link(old_inode, old_dir_entry,
old_dir_page, new_dir);
- down_write(&F2FS_I(old_inode)->i_sem);
- F2FS_I(old_inode)->i_pino = new_dir->i_ino;
- up_write(&F2FS_I(old_inode)->i_sem);
update_inode_page(old_inode);
} else {
kunmap(old_dir_page);
@@ -474,7 +472,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
return 0;
put_out_dir:
- f2fs_put_page(new_page, 1);
+ kunmap(new_page);
+ f2fs_put_page(new_page, 0);
out_dir:
if (old_dir_entry) {
kunmap(old_dir_page);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9dfb9a042fd2..4b697ccc9b0c 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -42,6 +42,8 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
} else if (type == DIRTY_DENTS) {
+ if (sbi->sb->s_bdi->dirty_exceeded)
+ return false;
mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index f25f0e07e26f..d04613df710a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -272,14 +272,15 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
return -ENOMEM;
spin_lock_init(&fcc->issue_lock);
init_waitqueue_head(&fcc->flush_wait_queue);
+ sbi->sm_info->cmd_control_info = fcc;
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
err = PTR_ERR(fcc->f2fs_issue_flush);
kfree(fcc);
+ sbi->sm_info->cmd_control_info = NULL;
return err;
}
- sbi->sm_info->cmd_control_info = fcc;
return err;
}
@@ -1885,8 +1886,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
/* init sm info */
sbi->sm_info = sm_info;
- INIT_LIST_HEAD(&sm_info->wblist_head);
- spin_lock_init(&sm_info->wblist_lock);
sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index b2b18637cb9e..8f96d9372ade 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -689,9 +689,7 @@ static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct inode *inode;
- if (unlikely(ino < F2FS_ROOT_INO(sbi)))
- return ERR_PTR(-ESTALE);
- if (unlikely(ino >= NM_I(sbi)->max_nid))
+ if (check_nid_range(sbi, ino))
return ERR_PTR(-ESTALE);
/*
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 098f97bdcf1b..ca887314aba9 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -643,9 +643,8 @@ struct fuse_copy_state {
unsigned long seglen;
unsigned long addr;
struct page *pg;
- void *mapaddr;
- void *buf;
unsigned len;
+ unsigned offset;
unsigned move_pages:1;
};
@@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
if (cs->currbuf) {
struct pipe_buffer *buf = cs->currbuf;
- if (!cs->write) {
- kunmap_atomic(cs->mapaddr);
- } else {
- kunmap_atomic(cs->mapaddr);
+ if (cs->write)
buf->len = PAGE_SIZE - cs->len;
- }
cs->currbuf = NULL;
- cs->mapaddr = NULL;
- } else if (cs->mapaddr) {
- kunmap_atomic(cs->mapaddr);
+ } else if (cs->pg) {
if (cs->write) {
flush_dcache_page(cs->pg);
set_page_dirty_lock(cs->pg);
}
put_page(cs->pg);
- cs->mapaddr = NULL;
}
+ cs->pg = NULL;
}
/*
@@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
*/
static int fuse_copy_fill(struct fuse_copy_state *cs)
{
- unsigned long offset;
+ struct page *page;
int err;
unlock_request(cs->fc, cs->req);
@@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
BUG_ON(!cs->nr_segs);
cs->currbuf = buf;
- cs->mapaddr = kmap_atomic(buf->page);
+ cs->pg = buf->page;
+ cs->offset = buf->offset;
cs->len = buf->len;
- cs->buf = cs->mapaddr + buf->offset;
cs->pipebufs++;
cs->nr_segs--;
} else {
- struct page *page;
-
if (cs->nr_segs == cs->pipe->buffers)
return -EIO;
@@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
buf->len = 0;
cs->currbuf = buf;
- cs->mapaddr = kmap_atomic(page);
- cs->buf = cs->mapaddr;
+ cs->pg = page;
+ cs->offset = 0;
cs->len = PAGE_SIZE;
cs->pipebufs++;
cs->nr_segs++;
@@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
cs->iov++;
cs->nr_segs--;
}
- err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
+ err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
if (err < 0)
return err;
BUG_ON(err != 1);
- offset = cs->addr % PAGE_SIZE;
- cs->mapaddr = kmap_atomic(cs->pg);
- cs->buf = cs->mapaddr + offset;
- cs->len = min(PAGE_SIZE - offset, cs->seglen);
+ cs->pg = page;
+ cs->offset = cs->addr % PAGE_SIZE;
+ cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
cs->seglen -= cs->len;
cs->addr += cs->len;
}
@@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
{
unsigned ncpy = min(*size, cs->len);
if (val) {
+ void *pgaddr = kmap_atomic(cs->pg);
+ void *buf = pgaddr + cs->offset;
+
if (cs->write)
- memcpy(cs->buf, *val, ncpy);
+ memcpy(buf, *val, ncpy);
else
- memcpy(*val, cs->buf, ncpy);
+ memcpy(*val, buf, ncpy);
+
+ kunmap_atomic(pgaddr);
*val += ncpy;
}
*size -= ncpy;
cs->len -= ncpy;
- cs->buf += ncpy;
+ cs->offset += ncpy;
return ncpy;
}
@@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
out_fallback_unlock:
unlock_page(newpage);
out_fallback:
- cs->mapaddr = kmap_atomic(buf->page);
- cs->buf = cs->mapaddr + buf->offset;
+ cs->pg = buf->page;
+ cs->offset = buf->offset;
err = lock_request(cs->fc, cs->req);
if (err)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 42198359fa1b..0c6048247a34 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
inode = ACCESS_ONCE(entry->d_inode);
if (inode && is_bad_inode(inode))
goto invalid;
- else if (fuse_dentry_time(entry) < get_jiffies_64()) {
+ else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
+ (flags & LOOKUP_REVAL)) {
int err;
struct fuse_entry_out outarg;
struct fuse_req *req;
@@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
return err;
}
-static int fuse_rename(struct inode *olddir, struct dentry *oldent,
- struct inode *newdir, struct dentry *newent)
-{
- return fuse_rename_common(olddir, oldent, newdir, newent, 0,
- FUSE_RENAME, sizeof(struct fuse_rename_in));
-}
-
static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
struct inode *newdir, struct dentry *newent,
unsigned int flags)
@@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
return -EINVAL;
- if (fc->no_rename2 || fc->minor < 23)
- return -EINVAL;
+ if (flags) {
+ if (fc->no_rename2 || fc->minor < 23)
+ return -EINVAL;
- err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
- FUSE_RENAME2, sizeof(struct fuse_rename2_in));
- if (err == -ENOSYS) {
- fc->no_rename2 = 1;
- err = -EINVAL;
+ err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+ FUSE_RENAME2,
+ sizeof(struct fuse_rename2_in));
+ if (err == -ENOSYS) {
+ fc->no_rename2 = 1;
+ err = -EINVAL;
+ }
+ } else {
+ err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
+ FUSE_RENAME,
+ sizeof(struct fuse_rename_in));
}
+
return err;
+}
+static int fuse_rename(struct inode *olddir, struct dentry *oldent,
+ struct inode *newdir, struct dentry *newent)
+{
+ return fuse_rename2(olddir, oldent, newdir, newent, 0);
}
static int fuse_link(struct dentry *entry, struct inode *newdir,
@@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
int err;
bool r;
- if (fi->i_time < get_jiffies_64()) {
+ if (time_before64(fi->i_time, get_jiffies_64())) {
r = true;
err = fuse_do_getattr(inode, stat, file);
} else {
@@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask)
((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
struct fuse_inode *fi = get_fuse_inode(inode);
- if (fi->i_time < get_jiffies_64()) {
+ if (time_before64(fi->i_time, get_jiffies_64())) {
refreshed = true;
err = fuse_perm_getattr(inode, mask);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6e16dad13e9b..40ac2628ddcf 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page)
error = -EIO;
req->ff = fuse_write_file_get(fc, fi);
if (!req->ff)
- goto err_free;
+ goto err_nofile;
fuse_write_fill(req, req->ff, page_offset(page), 0);
@@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page)
return 0;
+err_nofile:
+ __free_page(tmp_page);
err_free:
fuse_request_free(req);
err:
@@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping,
data.ff = NULL;
err = -ENOMEM;
- data.orig_pages = kzalloc(sizeof(struct page *) *
- FUSE_MAX_PAGES_PER_REQ,
+ data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
+ sizeof(struct page *),
GFP_NOFS);
if (!data.orig_pages)
goto out;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 754dcf23de8a..03246cd9d47a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -478,6 +478,17 @@ static const match_table_t tokens = {
{OPT_ERR, NULL}
};
+static int fuse_match_uint(substring_t *s, unsigned int *res)
+{
+ int err = -ENOMEM;
+ char *buf = match_strdup(s);
+ if (buf) {
+ err = kstrtouint(buf, 10, res);
+ kfree(buf);
+ }
+ return err;
+}
+
static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
{
char *p;
@@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
while ((p = strsep(&opt, ",")) != NULL) {
int token;
int value;
+ unsigned uv;
substring_t args[MAX_OPT_ARGS];
if (!*p)
continue;
@@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
break;
case OPT_USER_ID:
- if (match_int(&args[0], &value))
+ if (fuse_match_uint(&args[0], &uv))
return 0;
- d->user_id = make_kuid(current_user_ns(), value);
+ d->user_id = make_kuid(current_user_ns(), uv);
if (!uid_valid(d->user_id))
return 0;
d->user_id_present = 1;
break;
case OPT_GROUP_ID:
- if (match_int(&args[0], &value))
+ if (fuse_match_uint(&args[0], &uv))
return 0;
- d->group_id = make_kgid(current_user_ns(), value);
+ d->group_id = make_kgid(current_user_ns(), uv);
if (!gid_valid(d->group_id))
return 0;
d->group_id_present = 1;
@@ -895,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->writeback_cache = 1;
if (arg->time_gran && arg->time_gran <= 1000000000)
fc->sb->s_time_gran = arg->time_gran;
- else
- fc->sb->s_time_gran = 1000000000;
-
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
@@ -926,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
- FUSE_WRITEBACK_CACHE;
+ FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -1006,7 +1015,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
- if (!parse_fuse_opt((char *) data, &d, is_bdev))
+ if (!parse_fuse_opt(data, &d, is_bdev))
goto err;
if (is_bdev) {
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4fc3a3046174..26b3f952e6b1 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
int error = 0;
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
- flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
+ flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
mutex_lock(&fp->f_fl_mutex);
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
goto out;
flock_lock_file_wait(file,
&(struct file_lock){.fl_type = F_UNLCK});
- gfs2_glock_dq_wait(fl_gh);
+ gfs2_glock_dq(fl_gh);
gfs2_holder_reinit(state, flags, fl_gh);
} else {
error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c355f7320e44..ee4e04fe60fc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
cachep = gfs2_glock_aspace_cachep;
else
cachep = gfs2_glock_cachep;
- gl = kmem_cache_alloc(cachep, GFP_KERNEL);
+ gl = kmem_cache_alloc(cachep, GFP_NOFS);
if (!gl)
return -ENOMEM;
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
if (glops->go_flags & GLOF_LVB) {
- gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
+ gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
if (!gl->gl_lksb.sb_lvbptr) {
kmem_cache_free(cachep, gl);
return -ENOMEM;
@@ -1404,12 +1404,16 @@ __acquires(&lru_lock)
gl = list_entry(list->next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
if (!spin_trylock(&gl->gl_spin)) {
+add_back_to_lru:
list_add(&gl->gl_lru, &lru_list);
atomic_inc(&lru_count);
continue;
}
+ if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+ spin_unlock(&gl->gl_spin);
+ goto add_back_to_lru;
+ }
clear_bit(GLF_LRU, &gl->gl_flags);
- spin_unlock(&lru_lock);
gl->gl_lockref.count++;
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1421,7 @@ __acquires(&lru_lock)
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gl->gl_lockref.count--;
spin_unlock(&gl->gl_spin);
- spin_lock(&lru_lock);
+ cond_resched_lock(&lru_lock);
}
}
@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr)
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
/* Test for being demotable */
- if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+ if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count);
freed++;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fc1100781bbc..2ffc67dce87f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
* inode_go_inval - prepare a inode glock to be released
* @gl: the glock
* @flags:
- *
- * Normally we invlidate everything, but if we are moving into
+ *
+ * Normally we invalidate everything, but if we are moving into
* LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
* can keep hold of the metadata, since it won't have changed.
*
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 91f274de1246..4fafea1c9ecf 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
new_size = old_size + RECOVER_SIZE_INC;
- submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
- result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
+ submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
+ result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
if (!submit || !result) {
kfree(submit);
kfree(result);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index db629d1bd1bd..f4cb9c0d6bbd 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
/**
* gfs2_free_extlen - Return extent length of free blocks
- * @rbm: Starting position
+ * @rrbm: Starting position
* @len: Max length to check
*
* Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
/**
* gfs2_rlist_free - free a resource group list
- * @list: the list of resource groups
+ * @rlist: the list of resource groups
*
*/
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 38cfcf5f6fce..6f0f590cc5a3 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1588,9 +1588,12 @@ int jbd2_journal_stop(handle_t *handle)
* to perform a synchronous write. We do this to detect the
* case where a single process is doing a stream of sync
* writes. No point in waiting for joiners in that case.
+ *
+ * Setting max_batch_time to 0 disables this completely.
*/
pid = current->pid;
- if (handle->h_sync && journal->j_last_sync_writer != pid) {
+ if (handle->h_sync && journal->j_last_sync_writer != pid &&
+ journal->j_max_batch_time) {
u64 commit_time, trans_time;
journal->j_last_sync_writer = pid;
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index d171b98a6cdd..f973ae9b05f1 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -211,6 +211,36 @@ void kernfs_kill_sb(struct super_block *sb)
kernfs_put(root_kn);
}
+/**
+ * kernfs_pin_sb: try to pin the superblock associated with a kernfs_root
+ * @kernfs_root: the kernfs_root in question
+ * @ns: the namespace tag
+ *
+ * Pin the superblock so the superblock won't be destroyed in subsequent
+ * operations. This can be used to block ->kill_sb() which may be useful
+ * for kernfs users which dynamically manage superblocks.
+ *
+ * Returns NULL if there's no superblock associated to this kernfs_root, or
+ * -EINVAL if the superblock is being freed.
+ */
+struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns)
+{
+ struct kernfs_super_info *info;
+ struct super_block *sb = NULL;
+
+ mutex_lock(&kernfs_mutex);
+ list_for_each_entry(info, &root->supers, node) {
+ if (info->ns == ns) {
+ sb = info->sb;
+ if (!atomic_inc_not_zero(&info->sb->s_active))
+ sb = ERR_PTR(-EINVAL);
+ break;
+ }
+ }
+ mutex_unlock(&kernfs_mutex);
+ return sb;
+}
+
void __init kernfs_init(void)
{
kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
diff --git a/fs/namei.c b/fs/namei.c
index 985c6f368485..9eb787e5c167 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2256,9 +2256,10 @@ done:
goto out;
}
path->dentry = dentry;
- path->mnt = mntget(nd->path.mnt);
+ path->mnt = nd->path.mnt;
if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
return 1;
+ mntget(path->mnt);
follow_mount(path);
error = 0;
out:
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8f98138cbc43..f11b9eed0de1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
spin_unlock(&dreq->lock);
while (!list_empty(&hdr->pages)) {
- bool do_destroy = true;
req = nfs_list_entry(hdr->pages.next);
nfs_list_remove_request(req);
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
case NFS_IOHDR_NEED_COMMIT:
kref_get(&req->wb_kref);
nfs_mark_request_commit(req, hdr->lseg, &cinfo);
- do_destroy = false;
}
nfs_unlock_and_release_request(req);
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 82ddbf46660e..f415cbf9f6c3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
const struct rpc_call_ops *, int, int);
+void nfs_free_request(struct nfs_page *req);
static inline void nfs_iocounter_init(struct nfs_io_counter *c)
{
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 871d6eda8dba..8f854dde4150 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
&posix_acl_default_xattr_handler,
NULL,
};
+
+static int
+nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
+ size_t size, ssize_t *result)
+{
+ struct posix_acl *acl;
+ char *p = data + *result;
+
+ acl = get_acl(inode, type);
+ if (!acl)
+ return 0;
+
+ posix_acl_release(acl);
+
+ *result += strlen(name);
+ *result += 1;
+ if (!size)
+ return 0;
+ if (*result > size)
+ return -ERANGE;
+
+ strcpy(p, name);
+ return 0;
+}
+
+ssize_t
+nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ ssize_t result = 0;
+ int error;
+
+ error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
+ POSIX_ACL_XATTR_ACCESS, data, size, &result);
+ if (error)
+ return error;
+
+ error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
+ POSIX_ACL_XATTR_DEFAULT, data, size, &result);
+ if (error)
+ return error;
+ return result;
+}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e7daa42bbc86..f0afa291fd58 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
.getattr = nfs_getattr,
.setattr = nfs_setattr,
#ifdef CONFIG_NFS_V3_ACL
- .listxattr = generic_listxattr,
+ .listxattr = nfs3_listxattr,
.getxattr = generic_getxattr,
.setxattr = generic_setxattr,
.removexattr = generic_removexattr,
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
.getattr = nfs_getattr,
.setattr = nfs_setattr,
#ifdef CONFIG_NFS_V3_ACL
- .listxattr = generic_listxattr,
+ .listxattr = nfs3_listxattr,
.getxattr = generic_getxattr,
.setxattr = generic_setxattr,
.removexattr = generic_removexattr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b6ee3a6ee96d..17fab89f6358 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,8 +29,6 @@
static struct kmem_cache *nfs_page_cachep;
static const struct rpc_call_ops nfs_pgio_common_ops;
-static void nfs_free_request(struct nfs_page *);
-
static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
{
p->npages = pagecount;
@@ -239,20 +237,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
WARN_ON_ONCE(prev == req);
if (!prev) {
+ /* a head request */
req->wb_head = req;
req->wb_this_page = req;
} else {
+ /* a subrequest */
WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
req->wb_head = prev->wb_head;
req->wb_this_page = prev->wb_this_page;
prev->wb_this_page = req;
+ /* All subrequests take a ref on the head request until
+ * nfs_page_group_destroy is called */
+ kref_get(&req->wb_head->wb_kref);
+
/* grab extra ref if head request has extra ref from
* the write/commit path to handle handoff between write
* and commit lists */
- if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags))
+ if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
+ set_bit(PG_INODE_REF, &req->wb_flags);
kref_get(&req->wb_kref);
+ }
}
}
@@ -269,6 +275,10 @@ nfs_page_group_destroy(struct kref *kref)
struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
struct nfs_page *tmp, *next;
+ /* subrequests must release the ref on the head request */
+ if (req->wb_head != req)
+ nfs_release_request(req->wb_head);
+
if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
return;
@@ -394,7 +404,7 @@ static void nfs_clear_request(struct nfs_page *req)
*
* Note: Should never be called with the spinlock held!
*/
-static void nfs_free_request(struct nfs_page *req)
+void nfs_free_request(struct nfs_page *req)
{
WARN_ON_ONCE(req->wb_this_page != req);
@@ -925,7 +935,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
nfs_pageio_doio(desc);
if (desc->pg_error < 0)
return 0;
- desc->pg_moreio = 0;
if (desc->pg_recoalesce)
return 0;
/* retry add_request for this subreq */
@@ -972,6 +981,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
desc->pg_count = 0;
desc->pg_base = 0;
desc->pg_recoalesce = 0;
+ desc->pg_moreio = 0;
while (!list_empty(&head)) {
struct nfs_page *req;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 98ff061ccaf3..5e2f10304548 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops;
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static const struct nfs_rw_ops nfs_rw_write_ops;
+static void nfs_clear_request_commit(struct nfs_page *req);
static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
}
+/*
+ * nfs_page_find_head_request_locked - find head request associated with @page
+ *
+ * must be called while holding the inode lock.
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
static struct nfs_page *
-nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
+nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
{
struct nfs_page *req = NULL;
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
/* Linearly search the commit list for the correct req */
list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
if (freq->wb_page == page) {
- req = freq;
+ req = freq->wb_head;
break;
}
}
}
- if (req)
+ if (req) {
+ WARN_ON_ONCE(req->wb_head != req);
+
kref_get(&req->wb_kref);
+ }
return req;
}
-static struct nfs_page *nfs_page_find_request(struct page *page)
+/*
+ * nfs_page_find_head_request - find head request associated with @page
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
+static struct nfs_page *nfs_page_find_head_request(struct page *page)
{
struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *req = NULL;
spin_lock(&inode->i_lock);
- req = nfs_page_find_request_locked(NFS_I(inode), page);
+ req = nfs_page_find_head_request_locked(NFS_I(inode), page);
spin_unlock(&inode->i_lock);
return req;
}
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req)
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
-static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
+
+/* nfs_page_group_clear_bits
+ * @req - an nfs request
+ * clears all page group related bits from @req
+ */
+static void
+nfs_page_group_clear_bits(struct nfs_page *req)
+{
+ clear_bit(PG_TEARDOWN, &req->wb_flags);
+ clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
+ clear_bit(PG_UPTODATE, &req->wb_flags);
+ clear_bit(PG_WB_END, &req->wb_flags);
+ clear_bit(PG_REMOVE, &req->wb_flags);
+}
+
+
+/*
+ * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
+ *
+ * this is a helper function for nfs_lock_and_join_requests
+ *
+ * @inode - inode associated with request page group, must be holding inode lock
+ * @head - head request of page group, must be holding head lock
+ * @req - request that couldn't lock and needs to wait on the req bit lock
+ * @nonblock - if true, don't actually wait
+ *
+ * NOTE: this must be called holding page_group bit lock and inode spin lock
+ * and BOTH will be released before returning.
+ *
+ * returns 0 on success, < 0 on error.
+ */
+static int
+nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
+ struct nfs_page *req, bool nonblock)
+ __releases(&inode->i_lock)
+{
+ struct nfs_page *tmp;
+ int ret;
+
+ /* relinquish all the locks successfully grabbed this run */
+ for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
+ nfs_unlock_request(tmp);
+
+ WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
+
+ /* grab a ref on the request that will be waited on */
+ kref_get(&req->wb_kref);
+
+ nfs_page_group_unlock(head);
+ spin_unlock(&inode->i_lock);
+
+ /* release ref from nfs_page_find_head_request_locked */
+ nfs_release_request(head);
+
+ if (!nonblock)
+ ret = nfs_wait_on_request(req);
+ else
+ ret = -EAGAIN;
+ nfs_release_request(req);
+
+ return ret;
+}
+
+/*
+ * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
+ *
+ * @destroy_list - request list (using wb_this_page) terminated by @old_head
+ * @old_head - the old head of the list
+ *
+ * All subrequests must be locked and removed from all lists, so at this point
+ * they are only "active" in this function, and possibly in nfs_wait_on_request
+ * with a reference held by some other context.
+ */
+static void
+nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+ struct nfs_page *old_head)
+{
+ while (destroy_list) {
+ struct nfs_page *subreq = destroy_list;
+
+ destroy_list = (subreq->wb_this_page == old_head) ?
+ NULL : subreq->wb_this_page;
+
+ WARN_ON_ONCE(old_head != subreq->wb_head);
+
+ /* make sure old group is not used */
+ subreq->wb_head = subreq;
+ subreq->wb_this_page = subreq;
+
+ nfs_clear_request_commit(subreq);
+
+ /* subreq is now totally disconnected from page group or any
+ * write / commit lists. last chance to wake any waiters */
+ nfs_unlock_request(subreq);
+
+ if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
+ /* release ref on old head request */
+ nfs_release_request(old_head);
+
+ nfs_page_group_clear_bits(subreq);
+
+ /* release the PG_INODE_REF reference */
+ if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
+ nfs_release_request(subreq);
+ else
+ WARN_ON_ONCE(1);
+ } else {
+ WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
+ /* zombie requests have already released the last
+ * reference and were waiting on the rest of the
+ * group to complete. Since it's no longer part of a
+ * group, simply free the request */
+ nfs_page_group_clear_bits(subreq);
+ nfs_free_request(subreq);
+ }
+ }
+}
+
+/*
+ * nfs_lock_and_join_requests - join all subreqs to the head req and return
+ * a locked reference, cancelling any pending
+ * operations for this page.
+ *
+ * @page - the page used to lookup the "page group" of nfs_page structures
+ * @nonblock - if true, don't block waiting for request locks
+ *
+ * This function joins all sub requests to the head request by first
+ * locking all requests in the group, cancelling any pending operations
+ * and finally updating the head request to cover the whole range covered by
+ * the (former) group. All subrequests are removed from any write or commit
+ * lists, unlinked from the group and destroyed.
+ *
+ * Returns a locked, referenced pointer to the head request - which after
+ * this call is guaranteed to be the only request associated with the page.
+ * Returns NULL if no requests are found for @page, or a ERR_PTR if an
+ * error was encountered.
+ */
+static struct nfs_page *
+nfs_lock_and_join_requests(struct page *page, bool nonblock)
{
struct inode *inode = page_file_mapping(page)->host;
- struct nfs_page *req;
+ struct nfs_page *head, *subreq;
+ struct nfs_page *destroy_list = NULL;
+ unsigned int total_bytes;
int ret;
+try_again:
+ total_bytes = 0;
+
+ WARN_ON_ONCE(destroy_list);
+
spin_lock(&inode->i_lock);
- for (;;) {
- req = nfs_page_find_request_locked(NFS_I(inode), page);
- if (req == NULL)
- break;
- if (nfs_lock_request(req))
- break;
- /* Note: If we hold the page lock, as is the case in nfs_writepage,
- * then the call to nfs_lock_request() will always
- * succeed provided that someone hasn't already marked the
- * request as dirty (in which case we don't care).
- */
+
+ /*
+ * A reference is taken only on the head request which acts as a
+ * reference to the whole page group - the group will not be destroyed
+ * until the head reference is released.
+ */
+ head = nfs_page_find_head_request_locked(NFS_I(inode), page);
+
+ if (!head) {
spin_unlock(&inode->i_lock);
- if (!nonblock)
- ret = nfs_wait_on_request(req);
- else
- ret = -EAGAIN;
- nfs_release_request(req);
- if (ret != 0)
+ return NULL;
+ }
+
+ /* lock each request in the page group */
+ nfs_page_group_lock(head);
+ subreq = head;
+ do {
+ /*
+ * Subrequests are always contiguous, non overlapping
+ * and in order. If not, it's a programming error.
+ */
+ WARN_ON_ONCE(subreq->wb_offset !=
+ (head->wb_offset + total_bytes));
+
+ /* keep track of how many bytes this group covers */
+ total_bytes += subreq->wb_bytes;
+
+ if (!nfs_lock_request(subreq)) {
+ /* releases page group bit lock and
+ * inode spin lock and all references */
+ ret = nfs_unroll_locks_and_wait(inode, head,
+ subreq, nonblock);
+
+ if (ret == 0)
+ goto try_again;
+
return ERR_PTR(ret);
- spin_lock(&inode->i_lock);
+ }
+
+ subreq = subreq->wb_this_page;
+ } while (subreq != head);
+
+ /* Now that all requests are locked, make sure they aren't on any list.
+ * Commit list removal accounting is done after locks are dropped */
+ subreq = head;
+ do {
+ nfs_list_remove_request(subreq);
+ subreq = subreq->wb_this_page;
+ } while (subreq != head);
+
+ /* unlink subrequests from head, destroy them later */
+ if (head->wb_this_page != head) {
+ /* destroy list will be terminated by head */
+ destroy_list = head->wb_this_page;
+ head->wb_this_page = head;
+
+ /* change head request to cover whole range that
+ * the former page group covered */
+ head->wb_bytes = total_bytes;
}
+
+ /*
+ * prepare head request to be added to new pgio descriptor
+ */
+ nfs_page_group_clear_bits(head);
+
+ /*
+ * some part of the group was still on the inode list - otherwise
+ * the group wouldn't be involved in async write.
+ * grab a reference for the head request, iff it needs one.
+ */
+ if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
+ kref_get(&head->wb_kref);
+
+ nfs_page_group_unlock(head);
+
+ /* drop lock to clear_request_commit the head req and clean up
+ * requests on destroy list */
spin_unlock(&inode->i_lock);
- return req;
+
+ nfs_destroy_unlinked_subrequests(destroy_list, head);
+
+ /* clean up commit list state */
+ nfs_clear_request_commit(head);
+
+ /* still holds ref on head from nfs_page_find_head_request_locked
+ * and still has lock on head from lock loop */
+ return head;
}
/*
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req;
int ret = 0;
- req = nfs_find_and_lock_request(page, nonblock);
+ req = nfs_lock_and_join_requests(page, nonblock);
if (!req)
goto out;
ret = PTR_ERR(req);
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
set_page_private(req->wb_page, (unsigned long)req);
}
nfsi->npages++;
- set_bit(PG_INODE_REF, &req->wb_flags);
+ /* this a head request for a page group - mark it as having an
+ * extra reference so sub groups can follow suit */
+ WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
kref_get(&req->wb_kref);
spin_unlock(&inode->i_lock);
}
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req)
nfsi->npages--;
spin_unlock(&inode->i_lock);
}
- nfs_release_request(req);
+
+ if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
+ nfs_release_request(req);
}
static void
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
{
struct nfs_commit_info cinfo;
unsigned long bytes = 0;
- bool do_destroy;
if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
goto out;
@@ -668,7 +897,6 @@ remove_req:
next:
nfs_unlock_request(req);
nfs_end_page_writeback(req);
- do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
nfs_release_request(req);
}
out:
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
spin_lock(&inode->i_lock);
for (;;) {
- req = nfs_page_find_request_locked(NFS_I(inode), page);
+ req = nfs_page_find_head_request_locked(NFS_I(inode), page);
if (req == NULL)
goto out_unlock;
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
* dropped page.
*/
do {
- req = nfs_page_find_request(page);
+ req = nfs_page_find_head_request(page);
if (req == NULL)
return 0;
l_ctx = req->wb_lock_context;
@@ -1569,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
struct nfs_page *req;
int ret = 0;
- for (;;) {
- wait_on_page_writeback(page);
- req = nfs_page_find_request(page);
- if (req == NULL)
- break;
- if (nfs_lock_request(req)) {
- nfs_clear_request_commit(req);
- nfs_inode_remove_request(req);
- /*
- * In case nfs_inode_remove_request has marked the
- * page as being dirty
- */
- cancel_dirty_page(page, PAGE_CACHE_SIZE);
- nfs_unlock_and_release_request(req);
- break;
- }
- ret = nfs_wait_on_request(req);
- nfs_release_request(req);
- if (ret < 0)
- break;
+ wait_on_page_writeback(page);
+
+ /* blocking call to cancel all requests and join to a single (head)
+ * request */
+ req = nfs_lock_and_join_requests(page, false);
+
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ } else if (req) {
+ /* all requests from this page have been cancelled by
+ * nfs_lock_and_join_requests, so just remove the head
+ * request from the inode / page_private pointer and
+ * release it */
+ nfs_inode_remove_request(req);
+ /*
+ * In case nfs_inode_remove_request has marked the
+ * page as being dirty
+ */
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
+ nfs_unlock_and_release_request(req);
}
+
return ret;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2fc7abebeb9b..944275c8f56d 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2641,7 +2641,7 @@ nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
{
__be32 *p;
- p = xdr_reserve_space(xdr, 6);
+ p = xdr_reserve_space(xdr, 20);
if (!p)
return NULL;
*p++ = htonl(2);
@@ -2879,6 +2879,7 @@ again:
* return the conflicting open:
*/
if (conf->len) {
+ kfree(conf->data);
conf->len = 0;
conf->data = NULL;
goto again;
@@ -2891,6 +2892,7 @@ again:
if (conf->len) {
p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
p = xdr_encode_opaque(p, conf->data, conf->len);
+ kfree(conf->data);
} else { /* non - nfsv4 lock in conflict, no clientid nor owner */
p = xdr_encode_hyper(p, (u64)0); /* clientid */
*p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
else if (nfserr == nfserr_denied)
nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
- kfree(lock->lk_denied.ld_owner.data);
+
return nfserr;
}
diff --git a/fs/open.c b/fs/open.c
index 36662d036237..d6fd3acde134 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -263,11 +263,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
return -EPERM;
/*
- * We can not allow to do any fallocate operation on an active
- * swapfile
+ * We cannot allow any fallocate operation on an active swapfile
*/
if (IS_SWAPFILE(inode))
- ret = -ETXTBSY;
+ return -ETXTBSY;
/*
* Revalidate the write permissions, in case security policy has
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9cd5f63715c0..7f30bdc57d13 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
struct dquot *dquot;
unsigned long freed = 0;
+ spin_lock(&dq_list_lock);
head = free_dquots.prev;
while (head != &free_dquots && sc->nr_to_scan) {
dquot = list_entry(head, struct dquot, dq_free);
@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
freed++;
head = free_dquots.prev;
}
+ spin_unlock(&dq_list_lock);
return freed;
}
diff --git a/fs/xattr.c b/fs/xattr.c
index 3377dff18404..c69e6d43a0d2 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
/* wrap around? */
len = sizeof(*new_xattr) + size;
- if (len <= sizeof(*new_xattr))
+ if (len < sizeof(*new_xattr))
return NULL;
new_xattr = kmalloc(len, GFP_KERNEL);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 96175df211b1..75c3fe5f3d9d 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay(
}
-int
-__xfs_bmapi_allocate(
+static int
+xfs_bmapi_allocate(
struct xfs_bmalloca *bma)
{
struct xfs_mount *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@ xfs_bmapi_write(
bma.flist = flist;
bma.firstblock = firstblock;
- if (flags & XFS_BMAPI_STACK_SWITCH)
- bma.stack_switch = 1;
-
while (bno < end && n < *nmap) {
inhole = eof || bma.got.br_startoff > bno;
wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 38ba36e9b2f0..b879ca56a64c 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -77,7 +77,6 @@ typedef struct xfs_bmap_free
* from written to unwritten, otherwise convert from unwritten to written.
*/
#define XFS_BMAPI_CONVERT 0x040
-#define XFS_BMAPI_STACK_SWITCH 0x080
#define XFS_BMAPI_FLAGS \
{ XFS_BMAPI_ENTIRE, "ENTIRE" }, \
@@ -86,8 +85,7 @@ typedef struct xfs_bmap_free
{ XFS_BMAPI_PREALLOC, "PREALLOC" }, \
{ XFS_BMAPI_IGSTATE, "IGSTATE" }, \
{ XFS_BMAPI_CONTIG, "CONTIG" }, \
- { XFS_BMAPI_CONVERT, "CONVERT" }, \
- { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
+ { XFS_BMAPI_CONVERT, "CONVERT" }
static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 703b3ec1796c..64731ef3324d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -249,59 +249,6 @@ xfs_bmap_rtalloc(
}
/*
- * Stack switching interfaces for allocation
- */
-static void
-xfs_bmapi_allocate_worker(
- struct work_struct *work)
-{
- struct xfs_bmalloca *args = container_of(work,
- struct xfs_bmalloca, work);
- unsigned long pflags;
- unsigned long new_pflags = PF_FSTRANS;
-
- /*
- * we are in a transaction context here, but may also be doing work
- * in kswapd context, and hence we may need to inherit that state
- * temporarily to ensure that we don't block waiting for memory reclaim
- * in any way.
- */
- if (args->kswapd)
- new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
-
- current_set_flags_nested(&pflags, new_pflags);
-
- args->result = __xfs_bmapi_allocate(args);
- complete(args->done);
-
- current_restore_flags_nested(&pflags, new_pflags);
-}
-
-/*
- * Some allocation requests often come in with little stack to work on. Push
- * them off to a worker thread so there is lots of stack to use. Otherwise just
- * call directly to avoid the context switch overhead here.
- */
-int
-xfs_bmapi_allocate(
- struct xfs_bmalloca *args)
-{
- DECLARE_COMPLETION_ONSTACK(done);
-
- if (!args->stack_switch)
- return __xfs_bmapi_allocate(args);
-
-
- args->done = &done;
- args->kswapd = current_is_kswapd();
- INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
- queue_work(xfs_alloc_wq, &args->work);
- wait_for_completion(&done);
- destroy_work_on_stack(&args->work);
- return args->result;
-}
-
-/*
* Check if the endoff is outside the last extent. If so the caller will grow
* the allocation to a stripe unit boundary. All offsets are considered outside
* the end of file for an empty fork, so 1 is returned in *eof in that case.
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 075f72232a64..2fdb72d2c908 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -55,8 +55,6 @@ struct xfs_bmalloca {
bool userdata;/* set if is user data */
bool aeof; /* allocated space at eof */
bool conv; /* overwriting unwritten extents */
- bool stack_switch;
- bool kswapd; /* allocation in kswapd context */
int flags;
struct completion *done;
struct work_struct work;
@@ -66,8 +64,6 @@ struct xfs_bmalloca {
int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
int *committed);
int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
-int xfs_bmapi_allocate(struct xfs_bmalloca *args);
-int __xfs_bmapi_allocate(struct xfs_bmalloca *args);
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int whichfork, int *eof);
int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index bf810c6baf2b..cf893bc1e373 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -33,6 +33,7 @@
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
+#include "xfs_alloc.h"
/*
* Cursor allocation zone.
@@ -2323,7 +2324,7 @@ error1:
* record (to be inserted into parent).
*/
STATIC int /* error */
-xfs_btree_split(
+__xfs_btree_split(
struct xfs_btree_cur *cur,
int level,
union xfs_btree_ptr *ptrp,
@@ -2503,6 +2504,85 @@ error0:
return error;
}
+struct xfs_btree_split_args {
+ struct xfs_btree_cur *cur;
+ int level;
+ union xfs_btree_ptr *ptrp;
+ union xfs_btree_key *key;
+ struct xfs_btree_cur **curp;
+ int *stat; /* success/failure */
+ int result;
+ bool kswapd; /* allocation in kswapd context */
+ struct completion *done;
+ struct work_struct work;
+};
+
+/*
+ * Stack switching interfaces for allocation
+ */
+static void
+xfs_btree_split_worker(
+ struct work_struct *work)
+{
+ struct xfs_btree_split_args *args = container_of(work,
+ struct xfs_btree_split_args, work);
+ unsigned long pflags;
+ unsigned long new_pflags = PF_FSTRANS;
+
+ /*
+ * we are in a transaction context here, but may also be doing work
+ * in kswapd context, and hence we may need to inherit that state
+ * temporarily to ensure that we don't block waiting for memory reclaim
+ * in any way.
+ */
+ if (args->kswapd)
+ new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+
+ current_set_flags_nested(&pflags, new_pflags);
+
+ args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
+ args->key, args->curp, args->stat);
+ complete(args->done);
+
+ current_restore_flags_nested(&pflags, new_pflags);
+}
+
+/*
+ * BMBT split requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. For the other
+ * btree types, just call directly to avoid the context switch overhead here.
+ */
+STATIC int /* error */
+xfs_btree_split(
+ struct xfs_btree_cur *cur,
+ int level,
+ union xfs_btree_ptr *ptrp,
+ union xfs_btree_key *key,
+ struct xfs_btree_cur **curp,
+ int *stat) /* success/failure */
+{
+ struct xfs_btree_split_args args;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ if (cur->bc_btnum != XFS_BTNUM_BMAP)
+ return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
+
+ args.cur = cur;
+ args.level = level;
+ args.ptrp = ptrp;
+ args.key = key;
+ args.curp = curp;
+ args.stat = stat;
+ args.done = &done;
+ args.kswapd = current_is_kswapd();
+ INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
+ queue_work(xfs_alloc_wq, &args.work);
+ wait_for_completion(&done);
+ destroy_work_on_stack(&args.work);
+ return args.result;
+}
+
+
/*
* Copy the old inode root contents into a real block and make the
* broot point to it.
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 6c5eb4c551e3..6d3ec2b6ee29 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate(
* pointer that the caller gave to us.
*/
error = xfs_bmapi_write(tp, ip, map_start_fsb,
- count_fsb,
- XFS_BMAPI_STACK_SWITCH,
+ count_fsb, 0,
&first_block, 1,
imap, &nimaps, &free_list);
if (error)
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index c3453b11f563..7703fa6770ff 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk(
}
/*
- * GQUOTINO and PQUOTINO cannot be used together in versions
- * of superblock that do not have pquotino. from->sb_flags
- * tells us which quota is active and should be copied to
- * disk.
+ * GQUOTINO and PQUOTINO cannot be used together in versions of
+ * superblock that do not have pquotino. from->sb_flags tells us which
+ * quota is active and should be copied to disk. If neither are active,
+ * make sure we write NULLFSINO to the sb_gquotino field as a quota
+ * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
+ * bit is set.
+ *
+ * Note that we don't need to handle the sb_uquotino or sb_pquotino here
+ * as they do not require any translation. Hence the main sb field loop
+ * will write them appropriately from the in-core superblock.
*/
if ((*fields & XFS_SB_GQUOTINO) &&
(from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk(
else if ((*fields & XFS_SB_PQUOTINO) &&
(from->sb_qflags & XFS_PQUOTA_ACCT))
to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
+ else {
+ /*
+ * We can't rely on just the fields being logged to tell us
+ * that it is safe to write NULLFSINO - we should only do that
+ * if quotas are not actually enabled. Hence only write
+ * NULLFSINO if both in-core quota inodes are NULL.
+ */
+ if (from->sb_gquotino == NULLFSINO &&
+ from->sb_pquotino == NULLFSINO)
+ to->sb_gquotino = cpu_to_be64(NULLFSINO);
+ }
*fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
}
diff --git a/include/acpi/video.h b/include/acpi/video.h
index ea4c7bbded4d..843ef1adfbfa 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -22,6 +22,7 @@ extern void acpi_video_unregister(void);
extern void acpi_video_unregister_backlight(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
+extern bool acpi_video_verify_backlight_support(void);
#else
static inline int acpi_video_register(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
@@ -31,6 +32,7 @@ static inline int acpi_video_get_edid(struct acpi_device *device, int type,
{
return -ENODEV;
}
+static inline bool acpi_video_verify_backlight_support(void) { return false; }
#endif
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 471ba48c7ae4..c1c0b0cf39b4 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -693,7 +693,7 @@
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
. = ALIGN(cacheline); \
- *(.data..percpu..readmostly) \
+ *(.data..percpu..read_mostly) \
. = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index a1344793f4a9..a57646382086 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -151,8 +151,6 @@ int drm_err(const char *func, const char *format, ...);
also include looping detection. */
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
-#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
-#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
#define DRM_MAP_HASH_OFFSET 0x10000000
@@ -346,18 +344,6 @@ struct drm_waitlist {
spinlock_t write_lock;
};
-struct drm_freelist {
- int initialized; /**< Freelist in use */
- atomic_t count; /**< Number of free buffers */
- struct drm_buf *next; /**< End pointer */
-
- wait_queue_head_t waiting; /**< Processes waiting on free bufs */
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
- atomic_t wfh; /**< If waiting for high mark */
- spinlock_t lock;
-};
-
typedef struct drm_dma_handle {
dma_addr_t busaddr;
void *vaddr;
@@ -375,7 +361,8 @@ struct drm_buf_entry {
int page_order;
struct drm_dma_handle **seglist;
- struct drm_freelist freelist;
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
};
/* Event queued up for userspace to read */
@@ -396,10 +383,7 @@ struct drm_prime_file_private {
/** File private data */
struct drm_file {
- unsigned always_authenticated :1;
unsigned authenticated :1;
- /* Whether we're master for a minor. Protected by master_mutex */
- unsigned is_master :1;
/* true when the client has asked us to expose stereo 3D mode flags */
unsigned stereo_allowed :1;
/*
@@ -441,23 +425,6 @@ struct drm_file {
struct drm_prime_file_private prime;
};
-/** Wait queue */
-struct drm_queue {
- atomic_t use_count; /**< Outstanding uses (+1) */
- atomic_t finalization; /**< Finalization in progress */
- atomic_t block_count; /**< Count of processes waiting */
- atomic_t block_read; /**< Queue blocked for reads */
- wait_queue_head_t read_queue; /**< Processes waiting on block_read */
- atomic_t block_write; /**< Queue blocked for writes */
- wait_queue_head_t write_queue; /**< Processes waiting on block_write */
- atomic_t total_queued; /**< Total queued statistic */
- atomic_t total_flushed; /**< Total flushes statistic */
- atomic_t total_locks; /**< Total locks statistics */
- enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
- struct drm_waitlist waitlist; /**< Pending buffers */
- wait_queue_head_t flush_queue; /**< Processes waiting until flush */
-};
-
/**
* Lock data.
*/
@@ -566,15 +533,6 @@ struct drm_map_list {
struct drm_master *master;
};
-/**
- * Context handle list
- */
-struct drm_ctx_list {
- struct list_head head; /**< list head */
- drm_context_t handle; /**< context handle */
- struct drm_file *tag; /**< associated fd private data */
-};
-
/* location of GART table */
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
@@ -1062,7 +1020,7 @@ struct drm_device {
/** \name Locks */
/*@{ */
struct mutex struct_mutex; /**< For others */
- struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
+ struct mutex master_mutex; /**< For drm_minor::master */
/*@} */
/** \name Usage Counters */
@@ -1200,6 +1158,21 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_LEGACY;
}
+/**
+ * drm_is_master() - Check whether a DRM open-file is DRM-Master
+ * @file: DRM open-file context
+ *
+ * This checks whether a DRM open-file context is owner of the master context
+ * attached to it. If a file owns a master context, it's called DRM-Master.
+ * Per DRM device, only one such file can be DRM-Master at a time.
+ *
+ * Returns: True if the file is DRM-Master, otherwise false.
+ */
+static inline bool drm_is_master(const struct drm_file *file)
+{
+ return file->master && file->master == file->minor->master;
+}
+
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
@@ -1215,7 +1188,6 @@ extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
/* Device support (drm_fops.h) */
extern struct mutex drm_global_mutex;
extern int drm_open(struct inode *inode, struct file *filp);
-extern int drm_stub_open(struct inode *inode, struct file *filp);
extern ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp);
@@ -1253,29 +1225,6 @@ extern int drm_setversion(struct drm_device *dev, void *data,
extern int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv);
- /* Context IOCTL support (drm_context.h) */
-extern int drm_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_getctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern int drm_ctxbitmap_init(struct drm_device *dev);
-extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
-extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
-
-extern int drm_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
/* Authentication IOCTL support (drm_auth.h) */
extern int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1395,16 +1344,12 @@ extern void drm_master_put(struct drm_master **master);
extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
-extern unsigned int drm_universal_planes;
extern unsigned int drm_vblank_offdelay;
extern unsigned int drm_timestamp_precision;
extern unsigned int drm_timestamp_monotonic;
extern struct class *drm_class;
-extern struct dentry *drm_debugfs_root;
-
-extern struct idr drm_minors_idr;
extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
@@ -1522,9 +1467,8 @@ extern int drm_pci_set_unique(struct drm_device *dev,
struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
extern void drm_sysfs_destroy(void);
-extern int drm_sysfs_device_add(struct drm_minor *minor);
+extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
-extern void drm_sysfs_device_remove(struct drm_minor *minor);
extern int drm_sysfs_connector_add(struct drm_connector *connector);
extern void drm_sysfs_connector_remove(struct drm_connector *connector);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 857bbb1551f8..f1105d0da059 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -41,6 +41,7 @@ struct drm_framebuffer;
struct drm_object_properties;
struct drm_file;
struct drm_clip_rect;
+struct device_node;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -75,6 +76,14 @@ static inline uint64_t I642U64(int64_t val)
return (uint64_t)*((uint64_t *)&val);
}
+/* rotation property bits */
+#define DRM_ROTATE_0 0
+#define DRM_ROTATE_90 1
+#define DRM_ROTATE_180 2
+#define DRM_ROTATE_270 3
+#define DRM_REFLECT_X 4
+#define DRM_REFLECT_Y 5
+
enum drm_connector_force {
DRM_FORCE_UNSPECIFIED,
DRM_FORCE_OFF,
@@ -314,6 +323,7 @@ struct drm_crtc_funcs {
*/
struct drm_crtc {
struct drm_device *dev;
+ struct device_node *port;
struct list_head head;
/**
@@ -331,6 +341,10 @@ struct drm_crtc {
struct drm_plane *primary;
struct drm_plane *cursor;
+ /* position of cursor plane on crtc */
+ int cursor_x;
+ int cursor_y;
+
/* Temporary tracking of the old fb while a modeset is ongoing. Used
* by drm_mode_set_config_internal to implement correct refcounting. */
struct drm_framebuffer *old_fb;
@@ -524,6 +538,8 @@ struct drm_connector {
struct drm_property_blob *edid_blob_ptr;
struct drm_object_properties properties;
+ struct drm_property_blob *path_blob_ptr;
+
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
/* requested DPMS state */
@@ -803,6 +819,7 @@ struct drm_mode_config {
struct list_head property_blob_list;
struct drm_property *edid_property;
struct drm_property *dpms_property;
+ struct drm_property *path_property;
struct drm_property *plane_type_property;
/* DVI-I properties */
@@ -826,6 +843,7 @@ struct drm_mode_config {
/* Optional properties */
struct drm_property *scaling_mode_property;
+ struct drm_property *aspect_ratio_property;
struct drm_property *dirty_info_property;
/* dumb ioctl parameters */
@@ -855,7 +873,7 @@ struct drm_prop_enum_list {
extern int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_plane *primary,
- void *cursor,
+ struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs);
extern int drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
@@ -942,6 +960,7 @@ extern const char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern void drm_mode_group_destroy(struct drm_mode_group *group);
+extern void drm_reinit_primary_mode_group(struct drm_device *dev);
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
@@ -951,6 +970,8 @@ extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
+extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
+ char *path);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid);
@@ -999,7 +1020,8 @@ extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
- int num_values);
+ int num_props,
+ uint64_t supported_bits);
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max);
@@ -1015,6 +1037,7 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
char *formats[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
@@ -1105,6 +1128,10 @@ extern int drm_format_plane_cpp(uint32_t format, int plane);
extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format);
extern const char *drm_get_format_name(uint32_t format);
+extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+ unsigned int supported_rotations);
+extern unsigned int drm_rotation_simplify(unsigned int rotation,
+ unsigned int supported_rotations);
/* Helpers */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
new file mode 100644
index 000000000000..9b446ada2532
--- /dev/null
+++ b/include/drm/drm_dp_mst_helper.h
@@ -0,0 +1,509 @@
+/*
+ * Copyright © 2014 Red Hat.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+#ifndef _DRM_DP_MST_HELPER_H_
+#define _DRM_DP_MST_HELPER_H_
+
+#include <linux/types.h>
+#include <drm/drm_dp_helper.h>
+
+struct drm_dp_mst_branch;
+
+/**
+ * struct drm_dp_vcpi - Virtual Channel Payload Identifer
+ * @vcpi: Virtual channel ID.
+ * @pbn: Payload Bandwidth Number for this channel
+ * @aligned_pbn: PBN aligned with slot size
+ * @num_slots: number of slots for this PBN
+ */
+struct drm_dp_vcpi {
+ int vcpi;
+ int pbn;
+ int aligned_pbn;
+ int num_slots;
+};
+
+/**
+ * struct drm_dp_mst_port - MST port
+ * @kref: reference count for this port.
+ * @guid_valid: for DP 1.2 devices if we have validated the GUID.
+ * @guid: guid for DP 1.2 device on this port.
+ * @port_num: port number
+ * @input: if this port is an input port.
+ * @mcs: message capability status - DP 1.2 spec.
+ * @ddps: DisplayPort Device Plug Status - DP 1.2
+ * @pdt: Peer Device Type
+ * @ldps: Legacy Device Plug Status
+ * @dpcd_rev: DPCD revision of device on this port
+ * @num_sdp_streams: Number of simultaneous streams
+ * @num_sdp_stream_sinks: Number of stream sinks
+ * @available_pbn: Available bandwidth for this port.
+ * @next: link to next port on this branch device
+ * @mstb: branch device attach below this port
+ * @aux: i2c aux transport to talk to device connected to this port.
+ * @parent: branch device parent of this port
+ * @vcpi: Virtual Channel Payload info for this port.
+ * @connector: DRM connector this port is connected to.
+ * @mgr: topology manager this port lives under.
+ *
+ * This structure represents an MST port endpoint on a device somewhere
+ * in the MST topology.
+ */
+struct drm_dp_mst_port {
+ struct kref kref;
+
+ /* if dpcd 1.2 device is on this port - its GUID info */
+ bool guid_valid;
+ u8 guid[16];
+
+ u8 port_num;
+ bool input;
+ bool mcs;
+ bool ddps;
+ u8 pdt;
+ bool ldps;
+ u8 dpcd_rev;
+ u8 num_sdp_streams;
+ u8 num_sdp_stream_sinks;
+ uint16_t available_pbn;
+ struct list_head next;
+ struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
+ struct drm_dp_aux aux; /* i2c bus for this port? */
+ struct drm_dp_mst_branch *parent;
+
+ struct drm_dp_vcpi vcpi;
+ struct drm_connector *connector;
+ struct drm_dp_mst_topology_mgr *mgr;
+};
+
+/**
+ * struct drm_dp_mst_branch - MST branch device.
+ * @kref: reference count for this port.
+ * @rad: Relative Address to talk to this branch device.
+ * @lct: Link count total to talk to this branch device.
+ * @num_ports: number of ports on the branch.
+ * @msg_slots: one bit per transmitted msg slot.
+ * @ports: linked list of ports on this branch.
+ * @port_parent: pointer to the port parent, NULL if toplevel.
+ * @mgr: topology manager for this branch device.
+ * @tx_slots: transmission slots for this device.
+ * @last_seqno: last sequence number used to talk to this.
+ * @link_address_sent: if a link address message has been sent to this device yet.
+ *
+ * This structure represents an MST branch device, there is one
+ * primary branch device at the root, along with any others connected
+ * to downstream ports
+ */
+struct drm_dp_mst_branch {
+ struct kref kref;
+ u8 rad[8];
+ u8 lct;
+ int num_ports;
+
+ int msg_slots;
+ struct list_head ports;
+
+ /* list of tx ops queue for this port */
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ /* slots are protected by mstb->mgr->qlock */
+ struct drm_dp_sideband_msg_tx *tx_slots[2];
+ int last_seqno;
+ bool link_address_sent;
+};
+
+
+/* sideband msg header - not bit struct */
+struct drm_dp_sideband_msg_hdr {
+ u8 lct;
+ u8 lcr;
+ u8 rad[8];
+ bool broadcast;
+ bool path_msg;
+ u8 msg_len;
+ bool somt;
+ bool eomt;
+ bool seqno;
+};
+
+struct drm_dp_nak_reply {
+ u8 guid[16];
+ u8 reason;
+ u8 nak_data;
+};
+
+struct drm_dp_link_address_ack_reply {
+ u8 guid[16];
+ u8 nports;
+ struct drm_dp_link_addr_reply_port {
+ bool input_port;
+ u8 peer_device_type;
+ u8 port_number;
+ bool mcs;
+ bool ddps;
+ bool legacy_device_plug_status;
+ u8 dpcd_revision;
+ u8 peer_guid[16];
+ u8 num_sdp_streams;
+ u8 num_sdp_stream_sinks;
+ } ports[16];
+};
+
+struct drm_dp_remote_dpcd_read_ack_reply {
+ u8 port_number;
+ u8 num_bytes;
+ u8 bytes[255];
+};
+
+struct drm_dp_remote_dpcd_write_ack_reply {
+ u8 port_number;
+};
+
+struct drm_dp_remote_dpcd_write_nak_reply {
+ u8 port_number;
+ u8 reason;
+ u8 bytes_written_before_failure;
+};
+
+struct drm_dp_remote_i2c_read_ack_reply {
+ u8 port_number;
+ u8 num_bytes;
+ u8 bytes[255];
+};
+
+struct drm_dp_remote_i2c_read_nak_reply {
+ u8 port_number;
+ u8 nak_reason;
+ u8 i2c_nak_transaction;
+};
+
+struct drm_dp_remote_i2c_write_ack_reply {
+ u8 port_number;
+};
+
+
+struct drm_dp_sideband_msg_rx {
+ u8 chunk[48];
+ u8 msg[256];
+ u8 curchunk_len;
+ u8 curchunk_idx; /* chunk we are parsing now */
+ u8 curchunk_hdrlen;
+ u8 curlen; /* total length of the msg */
+ bool have_somt;
+ bool have_eomt;
+ struct drm_dp_sideband_msg_hdr initial_hdr;
+};
+
+
+struct drm_dp_allocate_payload {
+ u8 port_number;
+ u8 number_sdp_streams;
+ u8 vcpi;
+ u16 pbn;
+ u8 sdp_stream_sink[8];
+};
+
+struct drm_dp_allocate_payload_ack_reply {
+ u8 port_number;
+ u8 vcpi;
+ u16 allocated_pbn;
+};
+
+struct drm_dp_connection_status_notify {
+ u8 guid[16];
+ u8 port_number;
+ bool legacy_device_plug_status;
+ bool displayport_device_plug_status;
+ bool message_capability_status;
+ bool input_port;
+ u8 peer_device_type;
+};
+
+struct drm_dp_remote_dpcd_read {
+ u8 port_number;
+ u32 dpcd_address;
+ u8 num_bytes;
+};
+
+struct drm_dp_remote_dpcd_write {
+ u8 port_number;
+ u32 dpcd_address;
+ u8 num_bytes;
+ u8 *bytes;
+};
+
+struct drm_dp_remote_i2c_read {
+ u8 num_transactions;
+ u8 port_number;
+ struct {
+ u8 i2c_dev_id;
+ u8 num_bytes;
+ u8 *bytes;
+ u8 no_stop_bit;
+ u8 i2c_transaction_delay;
+ } transactions[4];
+ u8 read_i2c_device_id;
+ u8 num_bytes_read;
+};
+
+struct drm_dp_remote_i2c_write {
+ u8 port_number;
+ u8 write_i2c_device_id;
+ u8 num_bytes;
+ u8 *bytes;
+};
+
+/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
+struct drm_dp_port_number_req {
+ u8 port_number;
+};
+
+struct drm_dp_enum_path_resources_ack_reply {
+ u8 port_number;
+ u16 full_payload_bw_number;
+ u16 avail_payload_bw_number;
+};
+
+/* covers POWER_DOWN_PHY, POWER_UP_PHY */
+struct drm_dp_port_number_rep {
+ u8 port_number;
+};
+
+struct drm_dp_query_payload {
+ u8 port_number;
+ u8 vcpi;
+};
+
+struct drm_dp_resource_status_notify {
+ u8 port_number;
+ u8 guid[16];
+ u16 available_pbn;
+};
+
+struct drm_dp_query_payload_ack_reply {
+ u8 port_number;
+ u8 allocated_pbn;
+};
+
+struct drm_dp_sideband_msg_req_body {
+ u8 req_type;
+ union ack_req {
+ struct drm_dp_connection_status_notify conn_stat;
+ struct drm_dp_port_number_req port_num;
+ struct drm_dp_resource_status_notify resource_stat;
+
+ struct drm_dp_query_payload query_payload;
+ struct drm_dp_allocate_payload allocate_payload;
+
+ struct drm_dp_remote_dpcd_read dpcd_read;
+ struct drm_dp_remote_dpcd_write dpcd_write;
+
+ struct drm_dp_remote_i2c_read i2c_read;
+ struct drm_dp_remote_i2c_write i2c_write;
+ } u;
+};
+
+struct drm_dp_sideband_msg_reply_body {
+ u8 reply_type;
+ u8 req_type;
+ union ack_replies {
+ struct drm_dp_nak_reply nak;
+ struct drm_dp_link_address_ack_reply link_addr;
+ struct drm_dp_port_number_rep port_number;
+
+ struct drm_dp_enum_path_resources_ack_reply path_resources;
+ struct drm_dp_allocate_payload_ack_reply allocate_payload;
+ struct drm_dp_query_payload_ack_reply query_payload;
+
+ struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
+ struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
+ struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
+
+ struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
+ struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
+ struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
+ } u;
+};
+
+/* msg is queued to be put into a slot */
+#define DRM_DP_SIDEBAND_TX_QUEUED 0
+/* msg has started transmitting on a slot - still on msgq */
+#define DRM_DP_SIDEBAND_TX_START_SEND 1
+/* msg has finished transmitting on a slot - removed from msgq only in slot */
+#define DRM_DP_SIDEBAND_TX_SENT 2
+/* msg has received a response - removed from slot */
+#define DRM_DP_SIDEBAND_TX_RX 3
+#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
+
+struct drm_dp_sideband_msg_tx {
+ u8 msg[256];
+ u8 chunk[48];
+ u8 cur_offset;
+ u8 cur_len;
+ struct drm_dp_mst_branch *dst;
+ struct list_head next;
+ int seqno;
+ int state;
+ bool path_msg;
+ struct drm_dp_sideband_msg_reply_body reply;
+};
+
+/* sideband msg handler */
+struct drm_dp_mst_topology_mgr;
+struct drm_dp_mst_topology_cbs {
+ /* create a connector for a port */
+ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path);
+ void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector);
+ void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
+
+};
+
+#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
+
+#define DP_PAYLOAD_LOCAL 1
+#define DP_PAYLOAD_REMOTE 2
+#define DP_PAYLOAD_DELETE_LOCAL 3
+
+struct drm_dp_payload {
+ int payload_state;
+ int start_slot;
+ int num_slots;
+};
+
+/**
+ * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
+ * @dev: device pointer for adding i2c devices etc.
+ * @cbs: callbacks for connector addition and destruction.
+ * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
+ * @aux: aux channel for the DP connector.
+ * @max_payloads: maximum number of payloads the GPU can generate.
+ * @conn_base_id: DRM connector ID this mgr is connected to.
+ * @down_rep_recv: msg receiver state for down replies.
+ * @up_req_recv: msg receiver state for up requests.
+ * @lock: protects mst state, primary, guid, dpcd.
+ * @mst_state: if this manager is enabled for an MST capable port.
+ * @mst_primary: pointer to the primary branch device.
+ * @guid_valid: GUID valid for the primary branch device.
+ * @guid: GUID for primary port.
+ * @dpcd: cache of DPCD for primary port.
+ * @pbn_div: PBN to slots divisor.
+ *
+ * This struct represents the toplevel displayport MST topology manager.
+ * There should be one instance of this for every MST capable DP connector
+ * on the GPU.
+ */
+struct drm_dp_mst_topology_mgr {
+
+ struct device *dev;
+ struct drm_dp_mst_topology_cbs *cbs;
+ int max_dpcd_transaction_bytes;
+ struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
+ int max_payloads;
+ int conn_base_id;
+
+ /* only ever accessed from the workqueue - which should be serialised */
+ struct drm_dp_sideband_msg_rx down_rep_recv;
+ struct drm_dp_sideband_msg_rx up_req_recv;
+
+ /* pointer to info about the initial MST device */
+ struct mutex lock; /* protects mst_state + primary + guid + dpcd */
+
+ bool mst_state;
+ struct drm_dp_mst_branch *mst_primary;
+ /* primary MST device GUID */
+ bool guid_valid;
+ u8 guid[16];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 sink_count;
+ int pbn_div;
+ int total_slots;
+ int avail_slots;
+ int total_pbn;
+
+ /* messages to be transmitted */
+ /* qlock protects the upq/downq and in_progress,
+ the mstb tx_slots and txmsg->state once they are queued */
+ struct mutex qlock;
+ struct list_head tx_msg_downq;
+ struct list_head tx_msg_upq;
+ bool tx_down_in_progress;
+ bool tx_up_in_progress;
+
+ /* payload info + lock for it */
+ struct mutex payload_lock;
+ struct drm_dp_vcpi **proposed_vcpis;
+ struct drm_dp_payload *payloads;
+ unsigned long payload_mask;
+
+ wait_queue_head_t tx_waitq;
+ struct work_struct work;
+
+ struct work_struct tx_work;
+};
+
+int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
+
+void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
+
+
+int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
+
+
+int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
+
+
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+
+int drm_dp_calc_pbn_mode(int clock, int bpp);
+
+
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
+
+
+void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
+
+
+int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
+ int pbn);
+
+
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
+
+
+int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
+
+int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
+
+void drm_dp_mst_dump_topology(struct seq_file *m,
+ struct drm_dp_mst_topology_mgr *mgr);
+
+void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 1cf587f1f927..bfd329d613c4 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -86,6 +86,7 @@ struct drm_fb_helper {
int crtc_count;
struct drm_fb_helper_crtc *crtc_info;
int connector_count;
+ int connector_info_alloc_count;
struct drm_fb_helper_connector **connector_info;
const struct drm_fb_helper_funcs *funcs;
struct fb_info *fbdev;
@@ -130,4 +131,7 @@ struct drm_display_mode *
drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int width, int height);
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
+int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector);
#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index efa1b552adc5..2bb55b8b9031 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -123,14 +123,17 @@ struct mipi_dsi_device {
unsigned long mode_flags;
};
-#define to_mipi_dsi_device(d) container_of(d, struct mipi_dsi_device, dev)
+static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
+{
+ return container_of(dev, struct mipi_dsi_device, dev);
+}
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
-int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
- const void *data, size_t len);
-ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
- u8 cmd, void *data, size_t len);
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
+ size_t len);
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
+ size_t len);
/**
* struct mipi_dsi_driver - DSI driver
@@ -146,7 +149,11 @@ struct mipi_dsi_driver {
void (*shutdown)(struct mipi_dsi_device *dsi);
};
-#define to_mipi_dsi_driver(d) container_of(d, struct mipi_dsi_driver, driver)
+static inline struct mipi_dsi_driver *
+to_mipi_dsi_driver(struct device_driver *driver)
+{
+ return container_of(driver, struct mipi_dsi_driver, driver);
+}
static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi)
{
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
new file mode 100644
index 000000000000..2441f7112074
--- /dev/null
+++ b/include/drm/drm_of.h
@@ -0,0 +1,18 @@
+#ifndef __DRM_OF_H__
+#define __DRM_OF_H__
+
+struct drm_device;
+struct device_node;
+
+#ifdef CONFIG_OF
+extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+ struct device_node *port);
+#else
+static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+ struct device_node *port)
+{
+ return 0;
+}
+#endif
+
+#endif /* __DRM_OF_H__ */
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index c2ab77add67c..1fbcc96063a7 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -30,8 +30,42 @@ struct drm_connector;
struct drm_device;
struct drm_panel;
+/**
+ * struct drm_panel_funcs - perform operations on a given panel
+ * @disable: disable panel (turn off back light, etc.)
+ * @unprepare: turn off panel
+ * @prepare: turn on panel and perform set up
+ * @enable: enable panel (turn on back light, etc.)
+ * @get_modes: add modes to the connector that the panel is attached to and
+ * return the number of modes added
+ *
+ * The .prepare() function is typically called before the display controller
+ * starts to transmit video data. Panel drivers can use this to turn the panel
+ * on and wait for it to become ready. If additional configuration is required
+ * (via a control bus such as I2C, SPI or DSI for example) this is a good time
+ * to do that.
+ *
+ * After the display controller has started transmitting video data, it's safe
+ * to call the .enable() function. This will typically enable the backlight to
+ * make the image on screen visible. Some panels require a certain amount of
+ * time or frames before the image is displayed. This function is responsible
+ * for taking this into account before enabling the backlight to avoid visual
+ * glitches.
+ *
+ * Before stopping video transmission from the display controller it can be
+ * necessary to turn off the panel to avoid visual glitches. This is done in
+ * the .disable() function. Analogously to .enable() this typically involves
+ * turning off the backlight and waiting for some time to make sure no image
+ * is visible on the panel. It is then safe for the display controller to
+ * cease transmission of video data.
+ *
+ * To save power when no video data is transmitted, a driver can power down
+ * the panel. This is the job of the .unprepare() function.
+ */
struct drm_panel_funcs {
int (*disable)(struct drm_panel *panel);
+ int (*unprepare)(struct drm_panel *panel);
+ int (*prepare)(struct drm_panel *panel);
int (*enable)(struct drm_panel *panel);
int (*get_modes)(struct drm_panel *panel);
};
@@ -46,6 +80,14 @@ struct drm_panel {
struct list_head list;
};
+static inline int drm_panel_unprepare(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->unprepare)
+ return panel->funcs->unprepare(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+
static inline int drm_panel_disable(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->disable)
@@ -54,6 +96,14 @@ static inline int drm_panel_disable(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
+static inline int drm_panel_prepare(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->prepare)
+ return panel->funcs->prepare(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+
static inline int drm_panel_enable(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->enable)
@@ -62,6 +112,14 @@ static inline int drm_panel_enable(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
+static inline int drm_panel_get_modes(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->get_modes)
+ return panel->funcs->get_modes(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+
void drm_panel_init(struct drm_panel *panel);
int drm_panel_add(struct drm_panel *panel);
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index d1286297567b..26bb55e9e8b6 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -163,5 +163,11 @@ int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_vscale, int max_vscale);
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
+void drm_rect_rotate(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation);
+void drm_rect_rotate_inv(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation);
#endif
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 97dcb89d37d3..21d51ae1d242 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -63,7 +63,6 @@
#define CLK_SCLK_MPHY_IXTAL24 161
/* gate clocks */
-#define CLK_ACLK66_PERIC 256
#define CLK_UART0 257
#define CLK_UART1 258
#define CLK_UART2 259
@@ -203,6 +202,8 @@
#define CLK_MOUT_G3D 641
#define CLK_MOUT_VPLL 642
#define CLK_MOUT_MAUDIO0 643
+#define CLK_MOUT_USER_ACLK333 644
+#define CLK_MOUT_SW_ACLK333 645
/* divider clocks */
#define CLK_DOUT_PIXEL 768
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 002a2855c046..3d33794e4f3e 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -30,7 +30,8 @@
#define MUX_MODE14 0xe
#define MUX_MODE15 0xf
-#define PULL_ENA (1 << 16)
+#define PULL_ENA (0 << 16)
+#define PULL_DIS (1 << 16)
#define PULL_UP (1 << 17)
#define INPUT_EN (1 << 18)
#define SLEWCONTROL (1 << 19)
@@ -38,10 +39,10 @@
#define WAKEUP_EVENT (1 << 25)
/* Active pin states */
-#define PIN_OUTPUT 0
+#define PIN_OUTPUT (0 | PULL_DIS)
#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP)
#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA)
-#define PIN_INPUT INPUT_EN
+#define PIN_INPUT (INPUT_EN | PULL_DIS)
#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL)
#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP)
#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN)
diff --git a/include/linux/component.h b/include/linux/component.h
index 68870182ca1e..c00dcc302611 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -29,4 +29,11 @@ void component_master_del(struct device *,
int component_master_add_child(struct master *master,
int (*compare)(struct device *, void *), void *compare_data);
+struct component_match;
+
+int component_master_add_with_match(struct device *,
+ const struct component_master_ops *, struct component_match *);
+void component_match_add(struct device *, struct component_match **,
+ int (*compare)(struct device *, void *), void *compare_data);
+
#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ec4112d257bc..8f8ae95c6e27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
*********************************************************************/
/* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID ~0
-#define CPUFREQ_TABLE_END ~1
+#define CPUFREQ_ENTRY_INVALID ~0u
+#define CPUFREQ_TABLE_END ~1u
/* Special Values of .flags field */
#define CPUFREQ_BOOST_FREQ (1 << 0)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 255cd5cc0754..a23c096b3080 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
bool is_hugepage_active(struct page *page);
+void free_huge_page(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 145375ea0bd9..30faf797c2c3 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -305,6 +305,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
struct kernfs_root *root, unsigned long magic,
bool *new_sb_created, const void *ns);
void kernfs_kill_sb(struct super_block *sb);
+struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
void kernfs_init(void);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5ab4e3a76721..92abb497ab14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@ struct ata_host {
struct device *dev;
void __iomem * const *iomap;
unsigned int n_ports;
+ unsigned int n_tags; /* nr of NCQ tags */
void *private_data;
struct ata_port_operations *ops;
unsigned long flags;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b12f4bbd064c..35b51e7af886 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -578,8 +578,6 @@ struct mlx4_cq {
u32 cons_index;
u16 irq;
- bool irq_affinity_change;
-
__be32 *set_ci_db;
__be32 *arm_db;
int arm_sn;
@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
+
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 11692dea18aa..42aa9b9ecd5f 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,7 @@
#include <linux/lockdep.h>
#include <linux/atomic.h>
#include <asm/processor.h>
+#include <linux/osq_lock.h>
/*
* Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
-struct optimistic_spin_queue;
struct mutex {
/* 1: unlocked, 0: locked, negative: locked, possible waiters */
atomic_t count;
@@ -56,7 +56,7 @@ struct mutex {
struct task_struct *owner;
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- struct optimistic_spin_queue *osq; /* Spinner MCS lock */
+ struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_MUTEXES
const char *name;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 05117899fcb4..0ff360d5b3b3 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -73,6 +73,8 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname,
int depth, void *data);
extern bool early_init_dt_scan(void *params);
+extern bool early_init_dt_verify(void *params);
+extern void early_init_dt_scan_nodes(void);
extern const char *of_flat_dt_get_machine_name(void);
extern const void *of_flat_dt_match_machine(const void *default_match,
@@ -84,6 +86,7 @@ extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *);
extern u64 fdt_translate_address(const void *blob, int node_offset);
+extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index a70c9493d55a..d449018d0726 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
-extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
- struct phy_device *phydev);
-
#else /* CONFIG_OF */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
}
-
-static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
- struct phy_device *phydev)
-{
-}
#endif /* CONFIG_OF */
#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 000000000000..90230d5811c5
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,27 @@
+#ifndef __LINUX_OSQ_LOCK_H
+#define __LINUX_OSQ_LOCK_H
+
+/*
+ * An MCS like lock especially tailored for optimistic spinning for sleeping
+ * lock implementations (mutex, rwsem, etc).
+ */
+
+#define OSQ_UNLOCKED_VAL (0)
+
+struct optimistic_spin_queue {
+ /*
+ * Stores an encoded value of the CPU # of the tail node in the queue.
+ * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
+ */
+ atomic_t tail;
+};
+
+/* Init macro and function. */
+#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
+
+static inline void osq_lock_init(struct optimistic_spin_queue *lock)
+{
+ atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
+}
+
+#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0a97b583ee8d..e1474ae18c88 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
}
/*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+ if (unlikely(PageHeadHuge(page)))
+ return page->index << compound_order(page);
+ else
+ return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+}
+
+/*
* Return byte-offset into filesystem object for page.
*/
static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index a5fc7d01aad6..dec01d6c3f80 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -146,10 +146,10 @@
* Declaration/definition used for per-CPU variables that must be read mostly.
*/
#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
- DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
+ DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
- DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
+ DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
/*
* Intermodule exports for per-CPU variables. sparse forgets about
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5a75d19aa661..6a94cc8b1ca0 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,7 +44,6 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
-#include <linux/percpu.h>
#include <asm/barrier.h>
extern int rcu_expedited; /* for sysctl */
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void);
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
-DECLARE_PER_CPU(int, rcu_cond_resched_count);
-void rcu_resched(void);
-
-/*
- * Is it time to report RCU quiescent states?
- *
- * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
- * increment some random CPU's count, and possibly also load the result from
- * yet another CPU's count. We might even clobber some other CPU's attempt
- * to zero its counter. This is all OK because the goal is not precision,
- * but rather reasonable amortization of rcu_note_context_switch() overhead
- * and extremely high probability of avoiding RCU CPU stall warnings.
- * Note that this function has to be preempted in just the wrong place,
- * many thousands of times in a row, for anything bad to happen.
- */
-static inline bool rcu_should_resched(void)
-{
- return raw_cpu_inc_return(rcu_cond_resched_count) >=
- RCU_COND_RESCHED_LIM;
-}
-
-/*
- * Report quiscent states to RCU if it is time to do so.
- */
-static inline void rcu_cond_resched(void)
-{
- if (unlikely(rcu_should_resched()))
- rcu_resched();
-}
-
-/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
* initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+void init_rcu_head(struct rcu_head *head);
+void destroy_rcu_head(struct rcu_head *head);
void init_rcu_head_on_stack(struct rcu_head *head);
void destroy_rcu_head_on_stack(struct rcu_head *head);
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+static inline void init_rcu_head(struct rcu_head *head)
+{
+}
+
+static inline void destroy_rcu_head(struct rcu_head *head)
+{
+}
+
static inline void init_rcu_head_on_stack(struct rcu_head *head)
{
}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index d5b13bc07a0b..561e8615528d 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -15,13 +15,13 @@
#ifdef __KERNEL__
/*
* the rw-semaphore definition
- * - if activity is 0 then there are no active readers or writers
- * - if activity is +ve then that is the number of active readers
- * - if activity is -1 then there is one active writer
+ * - if count is 0 then there are no active readers or writers
+ * - if count is +ve then that is the number of active readers
+ * - if count is -1 then there is one active writer
* - if wait_list is not empty, then there are processes waiting for the semaphore
*/
struct rw_semaphore {
- __s32 activity;
+ __s32 count;
raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8d79708146aa..035d3c57fc8a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -13,10 +13,11 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-
#include <linux/atomic.h>
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#include <linux/osq_lock.h>
+#endif
-struct optimistic_spin_queue;
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
/* All arch specific implementations share the same struct */
struct rw_semaphore {
long count;
- raw_spinlock_t wait_lock;
struct list_head wait_list;
-#ifdef CONFIG_SMP
+ raw_spinlock_t wait_lock;
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* spinner MCS lock */
/*
* Write owner. Used as a speculative check to see
* if the owner is running on the cpu.
*/
struct task_struct *owner;
- struct optimistic_spin_queue *osq; /* spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
-#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, \
- __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- LIST_HEAD_INIT((name).wait_list), \
- NULL, /* owner */ \
- NULL /* mcs lock */ \
- __RWSEM_DEP_MAP_INIT(name) }
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
#else
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, \
- __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_OPT_INIT(lockname)
#endif
+#define __RWSEM_INITIALIZER(name) \
+ { .count = RWSEM_UNLOCKED_VALUE, \
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
+ __RWSEM_OPT_INIT(name) \
+ __RWSEM_DEP_MAP_INIT(name) }
+
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 306f4f0c987a..0376b054a0d0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -872,21 +872,21 @@ enum cpu_idle_type {
#define SD_NUMA 0x4000 /* cross-node balancing */
#ifdef CONFIG_SCHED_SMT
-static inline const int cpu_smt_flags(void)
+static inline int cpu_smt_flags(void)
{
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_SCHED_MC
-static inline const int cpu_core_flags(void)
+static inline int cpu_core_flags(void)
{
return SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_NUMA
-static inline const int cpu_numa_flags(void)
+static inline int cpu_numa_flags(void)
{
return SD_NUMA;
}
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
bool cpus_share_cache(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
-typedef const int (*sched_domain_flags_f)(void);
+typedef int (*sched_domain_flags_f)(void);
#define SDTL_OVERLAP 0x01
diff --git a/include/net/ip.h b/include/net/ip.h
index 0e795df05ec9..7596eb22e1ce 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -309,16 +309,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
}
}
-#define IP_IDENTS_SZ 2048u
-extern atomic_t *ip_idents;
-
-static inline u32 ip_idents_reserve(u32 hash, int segs)
-{
- atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
-
- return atomic_add_return(segs, id_ptr) - segs;
-}
-
+u32 ip_idents_reserve(u32 hash, int segs);
void __ip_select_ident(struct iphdr *iph, int segs);
static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 7277caf3743d..47f425464f84 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -203,7 +203,6 @@ struct neigh_table {
void (*proxy_redo)(struct sk_buff *skb);
char *id;
struct neigh_parms parms;
- /* HACK. gc_* should follow parms without a gap! */
int gc_interval;
int gc_thresh1;
int gc_thresh2;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 713b0b88bd5a..c4d86198d3d6 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -6,6 +6,7 @@
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nf_tables.h>
+#include <linux/u64_stats_sync.h>
#include <net/netlink.h>
#define NFT_JUMP_STACK_SIZE 16
@@ -528,8 +529,9 @@ enum nft_chain_type {
};
struct nft_stats {
- u64 bytes;
- u64 pkts;
+ u64 bytes;
+ u64 pkts;
+ struct u64_stats_sync syncp;
};
#define NFT_HOOK_OPS_MAX 2
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
index 079030c853d8..e2070960bac0 100644
--- a/include/net/netns/ieee802154_6lowpan.h
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -16,7 +16,7 @@ struct netns_sysctl_lowpan {
struct netns_ieee802154_lowpan {
struct netns_sysctl_lowpan sysctl;
struct netns_frags frags;
- u16 max_dsize;
+ int max_dsize;
};
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 26a394cb91a8..eee608b12cc9 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,8 +13,8 @@ struct netns_nftables {
struct nft_af_info *inet;
struct nft_af_info *arp;
struct nft_af_info *bridge;
+ unsigned int base_seq;
u8 gencursor;
- u8 genctr;
};
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 173cae485de1..156350745700 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
static inline void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
- spin_lock(&sk->sk_dst_lock);
- __sk_dst_set(sk, dst);
- spin_unlock(&sk->sk_dst_lock);
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+ old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ dst_release(old_dst);
}
static inline void
@@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk)
static inline void
sk_dst_reset(struct sock *sk)
{
- spin_lock(&sk->sk_dst_lock);
- __sk_dst_reset(sk);
- spin_unlock(&sk->sk_dst_lock);
+ sk_dst_set(sk, NULL);
}
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index def54f9e07ca..a0db2d4aa5f0 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -88,6 +88,11 @@
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE 0
+#define DRM_MODE_PICTURE_ASPECT_4_3 1
+#define DRM_MODE_PICTURE_ASPECT_16_9 2
+
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 1cc0b610f162..509b2d7a41b7 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -796,7 +796,9 @@ struct drm_radeon_gem_info {
uint64_t vram_visible;
};
-#define RADEON_GEM_NO_BACKING_STORE 1
+#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
+#define RADEON_GEM_GTT_UC (1 << 1)
+#define RADEON_GEM_GTT_WC (1 << 2)
struct drm_radeon_gem_create {
uint64_t size;
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 40b5ca8a1b1f..25084a052a1e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -101,6 +101,7 @@
* - add FATTR_CTIME
* - add ctime and ctimensec to fuse_setattr_in
* - add FUSE_RENAME2 request
+ * - add FUSE_NO_OPEN_SUPPORT flag
*/
#ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
* FUSE_READDIRPLUS_AUTO: adaptive readdirplus
* FUSE_ASYNC_DIO: asynchronous direct I/O submission
* FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
+ * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
#define FUSE_READDIRPLUS_AUTO (1 << 14)
#define FUSE_ASYNC_DIO (1 << 15)
#define FUSE_WRITEBACK_CACHE (1 << 16)
+#define FUSE_NO_OPEN_SUPPORT (1 << 17)
/**
* CUSE INIT request/reply flags
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index b0393209679b..eaad58b5be4a 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -19,6 +19,7 @@
/* VIDCON0 */
#define VIDCON0 0x00
+#define VIDCON0_DSI_EN (1 << 30)
#define VIDCON0_INTERLACE (1 << 29)
#define VIDCON0_VIDOUT_MASK (0x7 << 26)
#define VIDCON0_VIDOUT_SHIFT 26
@@ -355,7 +356,7 @@
#define VIDINTCON0_INT_ENABLE (1 << 0)
#define VIDINTCON1 0x134
-#define VIDINTCON1_INT_I180 (1 << 2)
+#define VIDINTCON1_INT_I80 (1 << 2)
#define VIDINTCON1_INT_FRAME (1 << 1)
#define VIDINTCON1_INT_FIFO (1 << 0)
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index a5af2a26d94f..5c1aba154b64 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -170,6 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
unmap->dev_bus_addr = 0;
}
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
void **__shared);
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 35536d9c0964..76768ee812b2 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
endif
+config ARCH_SUPPORTS_ATOMIC_RMW
+ bool
+
config MUTEX_SPIN_ON_OWNER
def_bool y
- depends on SMP && !DEBUG_MUTEXES
+ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
+
+config RWSEM_SPIN_ON_OWNER
+ def_bool y
+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
config ARCH_USE_QUEUE_RWLOCK
bool
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7868fc3c0bc5..70776aec2562 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1648,10 +1648,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
int flags, const char *unused_dev_name,
void *data)
{
+ struct super_block *pinned_sb = NULL;
+ struct cgroup_subsys *ss;
struct cgroup_root *root;
struct cgroup_sb_opts opts;
struct dentry *dentry;
int ret;
+ int i;
bool new_sb;
/*
@@ -1677,6 +1680,27 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
goto out_unlock;
}
+ /*
+ * Destruction of cgroup root is asynchronous, so subsystems may
+ * still be dying after the previous unmount. Let's drain the
+ * dying subsystems. We just need to ensure that the ones
+ * unmounted previously finish dying and don't care about new ones
+ * starting. Testing ref liveliness is good enough.
+ */
+ for_each_subsys(ss, i) {
+ if (!(opts.subsys_mask & (1 << i)) ||
+ ss->root == &cgrp_dfl_root)
+ continue;
+
+ if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
+ mutex_unlock(&cgroup_mutex);
+ msleep(10);
+ ret = restart_syscall();
+ goto out_free;
+ }
+ cgroup_put(&ss->root->cgrp);
+ }
+
for_each_root(root) {
bool name_match = false;
@@ -1717,15 +1741,23 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
}
/*
- * A root's lifetime is governed by its root cgroup.
- * tryget_live failure indicate that the root is being
- * destroyed. Wait for destruction to complete so that the
- * subsystems are free. We can use wait_queue for the wait
- * but this path is super cold. Let's just sleep for a bit
- * and retry.
+ * We want to reuse @root whose lifetime is governed by its
+ * ->cgrp. Let's check whether @root is alive and keep it
+ * that way. As cgroup_kill_sb() can happen anytime, we
+ * want to block it by pinning the sb so that @root doesn't
+ * get killed before mount is complete.
+ *
+ * With the sb pinned, tryget_live can reliably indicate
+ * whether @root can be reused. If it's being killed,
+ * drain it. We can use wait_queue for the wait but this
+ * path is super cold. Let's just sleep a bit and retry.
*/
- if (!percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
+ pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
+ if (IS_ERR(pinned_sb) ||
+ !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
mutex_unlock(&cgroup_mutex);
+ if (!IS_ERR_OR_NULL(pinned_sb))
+ deactivate_super(pinned_sb);
msleep(10);
ret = restart_syscall();
goto out_free;
@@ -1770,6 +1802,16 @@ out_free:
CGROUP_SUPER_MAGIC, &new_sb);
if (IS_ERR(dentry) || !new_sb)
cgroup_put(&root->cgrp);
+
+ /*
+ * If @pinned_sb, we're reusing an existing root and holding an
+ * extra ref on its sb. Mount is complete. Put the extra ref.
+ */
+ if (pinned_sb) {
+ WARN_ON(new_sb);
+ deactivate_super(pinned_sb);
+ }
+
return dentry;
}
@@ -3328,7 +3370,7 @@ bool css_has_online_children(struct cgroup_subsys_state *css)
rcu_read_lock();
css_for_each_child(child, css) {
- if (css->flags & CSS_ONLINE) {
+ if (child->flags & CSS_ONLINE) {
ret = true;
break;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f6b33c696224..116a4164720a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1181,7 +1181,13 @@ done:
int current_cpuset_is_being_rebound(void)
{
- return task_cs(current) == cpuset_being_rebound;
+ int ret;
+
+ rcu_read_lock();
+ ret = task_cs(current) == cpuset_being_rebound;
+ rcu_read_unlock();
+
+ return ret;
}
static int update_relax_domain_level(struct cpuset *cs, s64 val)
@@ -1617,7 +1623,17 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
* resources, wait for the previously scheduled operations before
* proceeding, so that we don't end up keep removing tasks added
* after execution capability is restored.
+ *
+ * cpuset_hotplug_work calls back into cgroup core via
+ * cgroup_transfer_tasks() and waiting for it from a cgroupfs
+ * operation like this one can lead to a deadlock through kernfs
+ * active_ref protection. Let's break the protection. Losing the
+ * protection is okay as we check whether @cs is online after
+ * grabbing cpuset_mutex anyway. This only happens on the legacy
+ * hierarchies.
*/
+ css_get(&cs->css);
+ kernfs_break_active_protection(of->kn);
flush_work(&cpuset_hotplug_work);
mutex_lock(&cpuset_mutex);
@@ -1645,6 +1661,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
free_trial_cpuset(trialcs);
out_unlock:
mutex_unlock(&cpuset_mutex);
+ kernfs_unbreak_active_protection(of->kn);
+ css_put(&cs->css);
return retval ?: nbytes;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a33d9a2bcbd7..6b17ac1b0c2a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2320,7 +2320,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
next_parent = rcu_dereference(next_ctx->parent_ctx);
/* If neither context have a parent context; they cannot be clones. */
- if (!parent && !next_parent)
+ if (!parent || !next_parent)
goto unlock;
if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
@@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event,
struct perf_event_context *child_ctx,
struct task_struct *child)
{
- perf_remove_from_context(child_event, true);
+ /*
+ * Do not destroy the 'original' grouping; because of the context
+ * switch optimization the original events could've ended up in a
+ * random child task.
+ *
+ * If we were to destroy the original group, all group related
+ * operations would cease to function properly after this random
+ * child dies.
+ *
+ * Do destroy all inherited groups, we don't care about those
+ * and being thorough is better.
+ */
+ perf_remove_from_context(child_event, !!child_event->parent);
/*
* It can happen that the parent exits first, and has events
@@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event,
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event *child_event, *next;
- struct perf_event_context *child_ctx;
+ struct perf_event_context *child_ctx, *parent_ctx;
unsigned long flags;
if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
raw_spin_lock(&child_ctx->lock);
task_ctx_sched_out(child_ctx);
child->perf_event_ctxp[ctxn] = NULL;
+
+ /*
+ * In order to avoid freeing: child_ctx->parent_ctx->task
+ * under perf_event_context::lock, grab another reference.
+ */
+ parent_ctx = child_ctx->parent_ctx;
+ if (parent_ctx)
+ get_ctx(parent_ctx);
+
/*
* If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all
@@ -7509,6 +7530,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
+ * Now that we no longer hold perf_event_context::lock, drop
+ * our extra child_ctx->parent_ctx reference.
+ */
+ if (parent_ctx)
+ put_ctx(parent_ctx);
+
+ /*
* Report the task dead after unscheduling the events so that we
* won't get any samples after PERF_RECORD_EXIT. We can however still
* get a few PERF_RECORD_READ events.
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 369f41a94124..4b8f0c925884 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -33,6 +33,7 @@
#include <linux/swap.h>
#include <linux/syscore_ops.h>
#include <linux/compiler.h>
+#include <linux/hugetlb.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -1619,6 +1620,9 @@ static int __init crash_save_vmcoreinfo_init(void)
#endif
VMCOREINFO_NUMBER(PG_head_mask);
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
+#ifdef CONFIG_HUGETLBFS
+ VMCOREINFO_SYMBOL(free_huge_page);
+#endif
arch_crash_save_vmcoreinfo();
update_vmcoreinfo_note();
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3214289df5a7..734e9a7d280b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
{
unsigned long *iter;
struct kprobe_blacklist_entry *ent;
- unsigned long offset = 0, size = 0;
+ unsigned long entry, offset = 0, size = 0;
for (iter = start; iter < end; iter++) {
- if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
- pr_err("Failed to find blacklist %p\n", (void *)*iter);
+ entry = arch_deref_entry_point((void *)*iter);
+
+ if (!kernel_text_address(entry) ||
+ !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+ pr_err("Failed to find blacklist at %p\n",
+ (void *)entry);
continue;
}
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return -ENOMEM;
- ent->start_addr = *iter;
- ent->end_addr = *iter + size;
+ ent->start_addr = entry;
+ ent->end_addr = entry + size;
INIT_LIST_HEAD(&ent->list);
list_add_tail(&ent->list, &kprobe_blacklist);
}
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index 838dc9e00669..be9ee1559fca 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -14,21 +14,47 @@
* called from interrupt context and we have preemption disabled while
* spinning.
*/
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
+
+/*
+ * We use the value 0 to represent "no CPU", thus the encoded value
+ * will be the CPU number incremented by 1.
+ */
+static inline int encode_cpu(int cpu_nr)
+{
+ return cpu_nr + 1;
+}
+
+static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
+{
+ int cpu_nr = encoded_cpu_val - 1;
+
+ return per_cpu_ptr(&osq_node, cpu_nr);
+}
/*
* Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
* Can return NULL in case we were the last queued and we updated @lock instead.
*/
-static inline struct optimistic_spin_queue *
-osq_wait_next(struct optimistic_spin_queue **lock,
- struct optimistic_spin_queue *node,
- struct optimistic_spin_queue *prev)
+static inline struct optimistic_spin_node *
+osq_wait_next(struct optimistic_spin_queue *lock,
+ struct optimistic_spin_node *node,
+ struct optimistic_spin_node *prev)
{
- struct optimistic_spin_queue *next = NULL;
+ struct optimistic_spin_node *next = NULL;
+ int curr = encode_cpu(smp_processor_id());
+ int old;
+
+ /*
+ * If there is a prev node in queue, then the 'old' value will be
+ * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
+ * we're currently last in queue, then the queue will then become empty.
+ */
+ old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
for (;;) {
- if (*lock == node && cmpxchg(lock, node, prev) == node) {
+ if (atomic_read(&lock->tail) == curr &&
+ atomic_cmpxchg(&lock->tail, curr, old) == curr) {
/*
* We were the last queued, we moved @lock back. @prev
* will now observe @lock and will complete its
@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock,
return next;
}
-bool osq_lock(struct optimistic_spin_queue **lock)
+bool osq_lock(struct optimistic_spin_queue *lock)
{
- struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
- struct optimistic_spin_queue *prev, *next;
+ struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
+ struct optimistic_spin_node *prev, *next;
+ int curr = encode_cpu(smp_processor_id());
+ int old;
node->locked = 0;
node->next = NULL;
+ node->cpu = curr;
- node->prev = prev = xchg(lock, node);
- if (likely(prev == NULL))
+ old = atomic_xchg(&lock->tail, curr);
+ if (old == OSQ_UNLOCKED_VAL)
return true;
+ prev = decode_cpu(old);
+ node->prev = prev;
ACCESS_ONCE(prev->next) = node;
/*
@@ -149,20 +180,21 @@ unqueue:
return false;
}
-void osq_unlock(struct optimistic_spin_queue **lock)
+void osq_unlock(struct optimistic_spin_queue *lock)
{
- struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
- struct optimistic_spin_queue *next;
+ struct optimistic_spin_node *node, *next;
+ int curr = encode_cpu(smp_processor_id());
/*
* Fast path for the uncontended case.
*/
- if (likely(cmpxchg(lock, node, NULL) == node))
+ if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
return;
/*
* Second most likely case.
*/
+ node = this_cpu_ptr(&osq_node);
next = xchg(&node->next, NULL);
if (next) {
ACCESS_ONCE(next->locked) = 1;
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index a2dbac4aca6b..74356dc0ce29 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
* mutex_lock()/rwsem_down_{read,write}() etc.
*/
-struct optimistic_spin_queue {
- struct optimistic_spin_queue *next, *prev;
+struct optimistic_spin_node {
+ struct optimistic_spin_node *next, *prev;
int locked; /* 1 if lock acquired */
+ int cpu; /* encoded CPU # value */
};
-extern bool osq_lock(struct optimistic_spin_queue **lock);
-extern void osq_unlock(struct optimistic_spin_queue **lock);
+extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern void osq_unlock(struct optimistic_spin_queue *lock);
#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index bc73d33c6760..acca2c1a3c5e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
INIT_LIST_HEAD(&lock->wait_list);
mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- lock->osq = NULL;
+ osq_lock_init(&lock->osq);
#endif
debug_mutex_init(lock, name, key);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 9be8a9144978..2c93571162cb 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem)
unsigned long flags;
if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
- ret = (sem->activity != 0);
+ ret = (sem->count != 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
@@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
- sem->activity = 0;
+ sem->count = 0;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
@@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
waiter = list_entry(next, struct rwsem_waiter, list);
} while (waiter->type != RWSEM_WAITING_FOR_WRITE);
- sem->activity += woken;
+ sem->count += woken;
out:
return sem;
@@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem)
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+ if (sem->count >= 0 && list_empty(&sem->wait_list)) {
/* granted */
- sem->activity++;
+ sem->count++;
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem)
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+ if (sem->count >= 0 && list_empty(&sem->wait_list)) {
/* granted */
- sem->activity++;
+ sem->count++;
ret = 1;
}
@@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
* itself into sleep and waiting for system woke it or someone
* else in the head of the wait list up.
*/
- if (sem->activity == 0)
+ if (sem->count == 0)
break;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
raw_spin_lock_irqsave(&sem->wait_lock, flags);
}
/* got the lock */
- sem->activity = -1;
+ sem->count = -1;
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem)
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (sem->activity == 0) {
+ if (sem->count == 0) {
/* got the lock */
- sem->activity = -1;
+ sem->count = -1;
ret = 1;
}
@@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem)
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- if (--sem->activity == 0 && !list_empty(&sem->wait_list))
+ if (--sem->count == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem)
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- sem->activity = 0;
+ sem->count = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
@@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem)
raw_spin_lock_irqsave(&sem->wait_lock, flags);
- sem->activity = 1;
+ sem->count = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index dacc32142fcc..a2391ac135c8 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
sem->count = RWSEM_UNLOCKED_VALUE;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
sem->owner = NULL;
- sem->osq = NULL;
+ osq_lock_init(&sem->osq);
#endif
}
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
return false;
}
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
* Try to acquire write lock before the writer has been put on wait queue.
*/
@@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner;
- bool on_cpu = true;
+ bool on_cpu = false;
if (need_resched())
- return 0;
+ return false;
rcu_read_lock();
owner = ACCESS_ONCE(sem->owner);
@@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
rcu_read_unlock();
/*
- * If sem->owner is not set, the rwsem owner may have
- * just acquired it and not set the owner yet or the rwsem
- * has been released.
+ * If sem->owner is not set, yet we have just recently entered the
+ * slowpath, then there is a possibility reader(s) may have the lock.
+ * To be safe, avoid spinning in these situations.
*/
return on_cpu;
}
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 42f806de49d4..e2d3bc7f03b4 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -12,7 +12,7 @@
#include <linux/atomic.h>
-#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM)
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
sem->owner = current;
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0ca8d83e2369..4ee194eb524b 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -186,6 +186,7 @@ void thaw_processes(void)
printk("Restarting tasks ... ");
+ __usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
read_lock(&tasklist_lock);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4dd8822f732a..ed35a4790afe 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -306,7 +306,7 @@ int suspend_devices_and_enter(suspend_state_t state)
error = suspend_ops->begin(state);
if (error)
goto Close;
- } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) {
+ } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
error = freeze_ops->begin();
if (error)
goto Close;
@@ -335,7 +335,7 @@ int suspend_devices_and_enter(suspend_state_t state)
Close:
if (need_suspend_ops(state) && suspend_ops->end)
suspend_ops->end();
- else if (state == PM_SUSPEND_FREEZE && freeze_ops->end)
+ else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
freeze_ops->end();
return error;
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 7fa34f86e5ba..948a7693748e 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -18,7 +18,7 @@
* Copyright (C) IBM Corporation, 2005, 2006
*
* Authors: Paul E. McKenney <[email protected]>
- * Josh Triplett <[email protected]>
+ * Josh Triplett <[email protected]>
*
* See also: Documentation/RCU/torture.txt
*/
@@ -51,7 +51,7 @@
#include <linux/torture.h>
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <[email protected]> and Josh Triplett <[email protected]>");
+MODULE_AUTHOR("Paul E. McKenney <[email protected]> and Josh Triplett <[email protected]>");
torture_param(int, fqs_duration, 0,
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f1ba77363fbb..625d0b0cd75a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu)
rdp->passed_quiesce = 1;
}
+static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
+
+static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+ .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
+ .dynticks = ATOMIC_INIT(1),
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+ .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
+ .dynticks_idle = ATOMIC_INIT(1),
+#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+};
+
+/*
+ * Let the RCU core know that this CPU has gone through the scheduler,
+ * which is a quiescent state. This is called when the need for a
+ * quiescent state is urgent, so we burn an atomic operation and full
+ * memory barriers to let the RCU core know about it, regardless of what
+ * this CPU might (or might not) do in the near future.
+ *
+ * We inform the RCU core by emulating a zero-duration dyntick-idle
+ * period, which we in turn do by incrementing the ->dynticks counter
+ * by two.
+ */
+static void rcu_momentary_dyntick_idle(void)
+{
+ unsigned long flags;
+ struct rcu_data *rdp;
+ struct rcu_dynticks *rdtp;
+ int resched_mask;
+ struct rcu_state *rsp;
+
+ local_irq_save(flags);
+
+ /*
+ * Yes, we can lose flag-setting operations. This is OK, because
+ * the flag will be set again after some delay.
+ */
+ resched_mask = raw_cpu_read(rcu_sched_qs_mask);
+ raw_cpu_write(rcu_sched_qs_mask, 0);
+
+ /* Find the flavor that needs a quiescent state. */
+ for_each_rcu_flavor(rsp) {
+ rdp = raw_cpu_ptr(rsp->rda);
+ if (!(resched_mask & rsp->flavor_mask))
+ continue;
+ smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
+ if (ACCESS_ONCE(rdp->mynode->completed) !=
+ ACCESS_ONCE(rdp->cond_resched_completed))
+ continue;
+
+ /*
+ * Pretend to be momentarily idle for the quiescent state.
+ * This allows the grace-period kthread to record the
+ * quiescent state, with no need for this CPU to do anything
+ * further.
+ */
+ rdtp = this_cpu_ptr(&rcu_dynticks);
+ smp_mb__before_atomic(); /* Earlier stuff before QS. */
+ atomic_add(2, &rdtp->dynticks); /* QS. */
+ smp_mb__after_atomic(); /* Later stuff after QS. */
+ break;
+ }
+ local_irq_restore(flags);
+}
+
/*
* Note a context switch. This is a quiescent state for RCU-sched,
* and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu)
trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs(cpu);
rcu_preempt_note_context_switch(cpu);
+ if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+ rcu_momentary_dyntick_idle();
trace_rcu_utilization(TPS("End context switch"));
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
-static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
- .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
- .dynticks = ATOMIC_INIT(1),
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
- .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
- .dynticks_idle = ATOMIC_INIT(1),
-#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-};
-
static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
static long qhimark = 10000; /* If this many pending, ignore blimit. */
static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX;
module_param(jiffies_till_first_fqs, ulong, 0644);
module_param(jiffies_till_next_fqs, ulong, 0644);
+/*
+ * How long the grace period must be before we start recruiting
+ * quiescent-state help from rcu_note_context_switch().
+ */
+static ulong jiffies_till_sched_qs = HZ / 20;
+module_param(jiffies_till_sched_qs, ulong, 0644);
+
static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp);
static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
bool *isidle, unsigned long *maxj)
{
unsigned int curr;
+ int *rcrmp;
unsigned int snap;
curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
}
/*
- * There is a possibility that a CPU in adaptive-ticks state
- * might run in the kernel with the scheduling-clock tick disabled
- * for an extended time period. Invoke rcu_kick_nohz_cpu() to
- * force the CPU to restart the scheduling-clock tick in this
- * CPU is in this state.
- */
- rcu_kick_nohz_cpu(rdp->cpu);
-
- /*
- * Alternatively, the CPU might be running in the kernel
- * for an extended period of time without a quiescent state.
- * Attempt to force the CPU through the scheduler to gain the
- * needed quiescent state, but only if the grace period has gone
- * on for an uncommonly long time. If there are many stuck CPUs,
- * we will beat on the first one until it gets unstuck, then move
- * to the next. Only do this for the primary flavor of RCU.
+ * A CPU running for an extended time within the kernel can
+ * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
+ * even context-switching back and forth between a pair of
+ * in-kernel CPU-bound tasks cannot advance grace periods.
+ * So if the grace period is old enough, make the CPU pay attention.
+ * Note that the unsynchronized assignments to the per-CPU
+ * rcu_sched_qs_mask variable are safe. Yes, setting of
+ * bits can be lost, but they will be set again on the next
+ * force-quiescent-state pass. So lost bit sets do not result
+ * in incorrect behavior, merely in a grace period lasting
+ * a few jiffies longer than it might otherwise. Because
+ * there are at most four threads involved, and because the
+ * updates are only once every few jiffies, the probability of
+ * lossage (and thus of slight grace-period extension) is
+ * quite low.
+ *
+ * Note that if the jiffies_till_sched_qs boot/sysfs parameter
+ * is set too high, we override with half of the RCU CPU stall
+ * warning delay.
*/
- if (rdp->rsp == rcu_state_p &&
+ rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
+ if (ULONG_CMP_GE(jiffies,
+ rdp->rsp->gp_start + jiffies_till_sched_qs) ||
ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
- rdp->rsp->jiffies_resched += 5;
- resched_cpu(rdp->cpu);
+ if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
+ ACCESS_ONCE(rdp->cond_resched_completed) =
+ ACCESS_ONCE(rdp->mynode->completed);
+ smp_mb(); /* ->cond_resched_completed before *rcrmp. */
+ ACCESS_ONCE(*rcrmp) =
+ ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
+ resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
+ rdp->rsp->jiffies_resched += 5; /* Enable beating. */
+ } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
+ /* Time to beat on that CPU again! */
+ resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
+ rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
+ }
}
return 0;
@@ -3491,6 +3572,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
"rcu_node_fqs_1",
"rcu_node_fqs_2",
"rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
+ static u8 fl_mask = 0x1;
int cpustride = 1;
int i;
int j;
@@ -3509,6 +3591,8 @@ static void __init rcu_init_one(struct rcu_state *rsp,
for (i = 1; i < rcu_num_lvls; i++)
rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
rcu_init_levelspread(rsp);
+ rsp->flavor_mask = fl_mask;
+ fl_mask <<= 1;
/* Initialize the elements themselves, starting from the leaves. */
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bf2c1e669691..0f69a79c5b7d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -307,6 +307,9 @@ struct rcu_data {
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
unsigned long offline_fqs; /* Kicked due to being offline. */
+ unsigned long cond_resched_completed;
+ /* Grace period that needs help */
+ /* from cond_resched(). */
/* 5) __rcu_pending() statistics. */
unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
@@ -392,6 +395,7 @@ struct rcu_state {
struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
+ u8 flavor_mask; /* bit in flavor mask. */
struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
void (*func)(struct rcu_head *head));
@@ -563,7 +567,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
-static void rcu_kick_nohz_cpu(int cpu);
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
static bool init_nocb_callback_list(struct rcu_data *rdp);
static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index cbc2c45265e2..02ac0fb186b8 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2404,7 +2404,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
* if an adaptive-ticks CPU is failing to respond to the current grace
* period and has not be idle from an RCU perspective, kick it.
*/
-static void rcu_kick_nohz_cpu(int cpu)
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
{
#ifdef CONFIG_NO_HZ_FULL
if (tick_nohz_full_cpu(cpu))
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index a2aeb4df0f60..bc7883570530 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -200,12 +200,12 @@ void wait_rcu_gp(call_rcu_func_t crf)
EXPORT_SYMBOL_GPL(wait_rcu_gp);
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-static inline void debug_init_rcu_head(struct rcu_head *head)
+void init_rcu_head(struct rcu_head *head)
{
debug_object_init(head, &rcuhead_debug_descr);
}
-static inline void debug_rcu_head_free(struct rcu_head *head)
+void destroy_rcu_head(struct rcu_head *head)
{
debug_object_free(head, &rcuhead_debug_descr);
}
@@ -350,21 +350,3 @@ static int __init check_cpu_stall_init(void)
early_initcall(check_cpu_stall_init);
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
-
-/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-DEFINE_PER_CPU(int, rcu_cond_resched_count);
-
-/*
- * Report a set of RCU quiescent states, for use by cond_resched()
- * and friends. Out of line due to being called infrequently.
- */
-void rcu_resched(void)
-{
- preempt_disable();
- __this_cpu_write(rcu_cond_resched_count, 0);
- rcu_note_context_switch(smp_processor_id());
- preempt_enable();
-}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b494fe..bc1638b33449 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4147,7 +4147,6 @@ static void __cond_resched(void)
int __sched _cond_resched(void)
{
- rcu_cond_resched();
if (should_resched()) {
__cond_resched();
return 1;
@@ -4166,18 +4165,15 @@ EXPORT_SYMBOL(_cond_resched);
*/
int __cond_resched_lock(spinlock_t *lock)
{
- bool need_rcu_resched = rcu_should_resched();
int resched = should_resched();
int ret = 0;
lockdep_assert_held(lock);
- if (spin_needbreak(lock) || resched || need_rcu_resched) {
+ if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
__cond_resched();
- else if (unlikely(need_rcu_resched))
- rcu_resched();
else
cpu_relax();
ret = 1;
@@ -4191,7 +4187,6 @@ int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
- rcu_cond_resched(); /* BH disabled OK, just recording QSes. */
if (should_resched()) {
local_bh_enable();
__cond_resched();
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 695f9773bb60..627b3c34b821 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
avg_atom = p->se.sum_exec_runtime;
if (nr_switches)
- do_div(avg_atom, nr_switches);
+ avg_atom = div64_ul(avg_atom, nr_switches);
else
avg_atom = -1LL;
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 88c9c65a430d..fe75444ae7ec 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
struct itimerspec *new_setting,
struct itimerspec *old_setting)
{
+ ktime_t exp;
+
if (!rtcdev)
return -ENOTSUPP;
+ if (flags & ~TIMER_ABSTIME)
+ return -EINVAL;
+
if (old_setting)
alarm_timer_get(timr, old_setting);
@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
/* start the timer */
timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
- alarm_start(&timr->it.alarm.alarmtimer,
- timespec_to_ktime(new_setting->it_value));
+ exp = timespec_to_ktime(new_setting->it_value);
+ /* Convert (if necessary) to absolute time */
+ if (flags != TIMER_ABSTIME) {
+ ktime_t now;
+
+ now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
+ exp = ktime_add(now, exp);
+ }
+
+ alarm_start(&timr->it.alarm.alarmtimer, exp);
return 0;
}
@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
if (!alarmtimer_get_rtcdev())
return -ENOTSUPP;
+ if (flags & ~TIMER_ABSTIME)
+ return -EINVAL;
+
if (!capable(CAP_WAKE_ALARM))
return -EPERM;
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index ad362c260ef4..9c94c19f1305 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -146,7 +146,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
{
/* Nothing to do if we already reached the limit */
if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
- printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
+ printk_deferred(KERN_WARNING
+ "CE: Reprogramming failure. Giving up\n");
dev->next_event.tv64 = KTIME_MAX;
return -ETIME;
}
@@ -159,9 +160,10 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
if (dev->min_delta_ns > MIN_DELTA_LIMIT)
dev->min_delta_ns = MIN_DELTA_LIMIT;
- printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
- dev->name ? dev->name : "?",
- (unsigned long long) dev->min_delta_ns);
+ printk_deferred(KERN_WARNING
+ "CE: %s increased min_delta_ns to %llu nsec\n",
+ dev->name ? dev->name : "?",
+ (unsigned long long) dev->min_delta_ns);
return 0;
}
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 445106d2c729..01d2d15aa662 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -191,7 +191,8 @@ void __init sched_clock_postinit(void)
static int sched_clock_suspend(void)
{
- sched_clock_poll(&sched_clock_timer);
+ update_sched_clock();
+ hrtimer_cancel(&sched_clock_timer);
cd.suspended = true;
return 0;
}
@@ -199,6 +200,7 @@ static int sched_clock_suspend(void)
static void sched_clock_resume(void)
{
cd.epoch_cyc = read_sched_clock();
+ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
cd.suspended = false;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5b372e3ed675..ac9d1dad630b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -265,12 +265,12 @@ static void update_ftrace_function(void)
func = ftrace_ops_list_func;
}
+ update_function_graph_func();
+
/* If there's no change, then do nothing more here */
if (ftrace_trace_function == func)
return;
- update_function_graph_func();
-
/*
* If we are using the list function, it doesn't care
* about the function_trace_ops.
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7c56c3d06943..ff7027199a9a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct ring_buffer_per_cpu *cpu_buffer;
struct rb_irq_work *work;
- if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
- (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
- return POLLIN | POLLRDNORM;
-
if (cpu == RING_BUFFER_ALL_CPUS)
work = &buffer->irq_work;
else {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f243444a3772..291397e66669 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
struct print_entry *entry;
unsigned long irq_flags;
int alloc;
+ int pc;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ pc = preempt_count();
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
- irq_flags, preempt_count());
+ irq_flags, pc);
if (!event)
return 0;
@@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
entry->buf[size] = '\0';
__buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(buffer, irq_flags, 4, pc);
return size;
}
@@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str)
struct bputs_entry *entry;
unsigned long irq_flags;
int size = sizeof(struct bputs_entry);
+ int pc;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ pc = preempt_count();
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str)
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
- irq_flags, preempt_count());
+ irq_flags, pc);
if (!event)
return 0;
@@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry->str = str;
__buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(buffer, irq_flags, 4, pc);
return 1;
}
@@ -809,7 +823,7 @@ static struct {
{ trace_clock_local, "local", 1 },
{ trace_clock_global, "global", 1 },
{ trace_clock_counter, "counter", 0 },
- { trace_clock_jiffies, "uptime", 1 },
+ { trace_clock_jiffies, "uptime", 0 },
{ trace_clock, "perf", 1 },
ARCH_TRACE_CLOCKS
};
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 26dc348332b7..57b67b1f24d1 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
/*
* trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ * Note that this use of jiffies_64 is not completely safe on
+ * 32-bit systems. But the window is tiny, and the effect if
+ * we are affected is that we will have an obviously bogus
+ * timestamp on a trace event - i.e. not life threatening.
*/
u64 notrace trace_clock_jiffies(void)
{
- u64 jiffy = jiffies - INITIAL_JIFFIES;
-
- /* Return nsecs */
- return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+ return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
}
/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f99e0b3bca8c..2de53628689f 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -470,6 +470,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
list_del(&file->list);
remove_subsystem(file->system);
+ free_event_filter(file->filter);
kmem_cache_free(file_cachep, file);
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6203d2900877..35974ac69600 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3284,6 +3284,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
}
}
+ dev_set_uevent_suppress(&wq_dev->dev, false);
kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
return 0;
}
@@ -4879,7 +4880,7 @@ static void __init wq_numa_init(void)
BUG_ON(!tbl);
for_each_node(node)
- BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+ BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
node_online(node) ? node : NUMA_NO_NODE));
for_each_possible_cpu(cpu) {
diff --git a/lib/cpumask.c b/lib/cpumask.c
index c101230658eb..b6513a9f2892 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
i %= num_online_cpus();
- if (!cpumask_of_node(numa_node)) {
+ if (numa_node == -1 || !cpumask_of_node(numa_node)) {
/* Use all online cpu's for non numa aware system */
cpumask_copy(mask, cpu_online_mask);
} else {
diff --git a/mm/filemap.c b/mm/filemap.c
index dafb06f70a09..900edfaf6df5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1031,18 +1031,21 @@ EXPORT_SYMBOL(find_lock_entry);
* @mapping: the address_space to search
* @offset: the page index
* @fgp_flags: PCG flags
- * @gfp_mask: gfp mask to use if a page is to be allocated
+ * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
+ * @radix_gfp_mask: gfp mask to use for radix tree node allocation
*
* Looks up the page cache slot at @mapping & @offset.
*
- * PCG flags modify how the page is returned
+ * PCG flags modify how the page is returned.
*
* FGP_ACCESSED: the page will be marked accessed
* FGP_LOCK: Page is return locked
* FGP_CREAT: If page is not present then a new page is allocated using
- * @gfp_mask and added to the page cache and the VM's LRU
- * list. The page is returned locked and with an increased
- * refcount. Otherwise, %NULL is returned.
+ * @cache_gfp_mask and added to the page cache and the VM's LRU
+ * list. If radix tree nodes are allocated during page cache
+ * insertion then @radix_gfp_mask is used. The page is returned
+ * locked and with an increased refcount. Otherwise, %NULL is
+ * returned.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2024bbd573d2..7a0a73d2fcff 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -856,7 +856,7 @@ struct hstate *size_to_hstate(unsigned long size)
return NULL;
}
-static void free_huge_page(struct page *page)
+void free_huge_page(struct page *page)
{
/*
* Can't pass hstate in here because it is called from the
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
} else {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
+ entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
page_dup_rmap(ptepage);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a2c7bcb0e6eb..1f14a430c656 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5415,8 +5415,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
{
struct mem_cgroup_eventfd_list *ev;
+ spin_lock(&memcg_oom_lock);
+
list_for_each_entry(ev, &memcg->oom_notify, list)
eventfd_signal(ev->eventfd, 1);
+
+ spin_unlock(&memcg_oom_lock);
return 0;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c6399e328931..a013bc94ebbe 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
if (av == NULL) /* Not actually mapped anymore */
return;
- pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff = page_to_pgoff(page);
read_lock(&tasklist_lock);
for_each_process (tsk) {
struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
mutex_lock(&mapping->i_mmap_mutex);
read_lock(&tasklist_lock);
for_each_process(tsk) {
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff_t pgoff = page_to_pgoff(page);
struct task_struct *t = task_early_kill(tsk, force_early);
if (!t)
@@ -895,7 +895,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
struct page *hpage = *hpagep;
struct page *ppage;
- if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
+ /*
+ * Here we are interested only in user-mapped pages, so skip any
+ * other types of pages.
+ */
+ if (PageReserved(p) || PageSlab(p))
+ return SWAP_SUCCESS;
+ if (!(PageLRU(hpage) || PageHuge(p)))
return SWAP_SUCCESS;
/*
@@ -905,8 +911,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (!page_mapped(hpage))
return SWAP_SUCCESS;
- if (PageKsm(p))
+ if (PageKsm(p)) {
+ pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
return SWAP_FAIL;
+ }
if (PageSwapCache(p)) {
printk(KERN_ERR
@@ -1229,7 +1237,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
!= SWAP_SUCCESS) {
- printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
+ action_result(pfn, "unmapping failed", IGNORED);
res = -EBUSY;
goto out;
}
diff --git a/mm/memory.c b/mm/memory.c
index d67fd9fcf1f2..8b44f765b645 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2758,23 +2758,18 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
update_mmu_cache(vma, address, pte);
}
-static unsigned long fault_around_bytes = 65536;
+static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
-/*
- * fault_around_pages() and fault_around_mask() round down fault_around_bytes
- * to nearest page order. It's what do_fault_around() expects to see.
- */
static inline unsigned long fault_around_pages(void)
{
- return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE;
+ return fault_around_bytes >> PAGE_SHIFT;
}
static inline unsigned long fault_around_mask(void)
{
- return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK;
+ return ~(fault_around_bytes - 1) & PAGE_MASK;
}
-
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
{
@@ -2782,11 +2777,19 @@ static int fault_around_bytes_get(void *data, u64 *val)
return 0;
}
+/*
+ * fault_around_pages() and fault_around_mask() expects fault_around_bytes
+ * rounded down to nearest page order. It's what do_fault_around() expects to
+ * see.
+ */
static int fault_around_bytes_set(void *data, u64 val)
{
if (val / PAGE_SIZE > PTRS_PER_PTE)
return -EINVAL;
- fault_around_bytes = val;
+ if (val > PAGE_SIZE)
+ fault_around_bytes = rounddown_pow_of_two(val);
+ else
+ fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
@@ -2882,7 +2885,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* if page by the offset is not ready to be mapped (cold cache or
* something).
*/
- if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
+ if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
+ fault_around_pages() > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
do_fault_around(vma, address, pte, pgoff, flags);
if (!pte_same(*pte, orig_pte))
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eb58de19f815..8f5330d74f47 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2139,7 +2139,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
} else
*new = *old;
- rcu_read_lock();
if (current_cpuset_is_being_rebound()) {
nodemask_t mems = cpuset_mems_allowed(current);
if (new->flags & MPOL_F_REBINDING)
@@ -2147,7 +2146,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
else
mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
}
- rcu_read_unlock();
atomic_set(&new->refcnt, 1);
return new;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 9e0beaa91845..be6dbf995c0c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -988,9 +988,10 @@ out:
* it. Otherwise, putback_lru_page() will drop the reference grabbed
* during isolation.
*/
- if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+ if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+ ClearPageSwapBacked(newpage);
put_new_page(newpage, private);
- else
+ } else
putback_lru_page(newpage);
if (result) {
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 518e2c3f4c75..e0c943014eb7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1306,9 +1306,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
*bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
if (bdi_bg_thresh)
- *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
- background_thresh,
- dirty_thresh);
+ *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
+ background_thresh,
+ dirty_thresh) : 0;
/*
* In order to avoid the stacked BDI deadlock we need
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ea758b898fd..ef44ad736ca1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2447,7 +2447,7 @@ static inline int
gfp_to_alloc_flags(gfp_t gfp_mask)
{
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
- const gfp_t wait = gfp_mask & __GFP_WAIT;
+ const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -2456,20 +2456,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
* The caller may dip into page reserves a bit more if the caller
* cannot run direct reclaim, or if the caller has realtime scheduling
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
- * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+ * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
*/
alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
- if (!wait) {
+ if (atomic) {
/*
- * Not worth trying to allocate harder for
- * __GFP_NOMEMALLOC even if it can't schedule.
+ * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
+ * if it can't schedule.
*/
- if (!(gfp_mask & __GFP_NOMEMALLOC))
+ if (!(gfp_mask & __GFP_NOMEMALLOC))
alloc_flags |= ALLOC_HARDER;
/*
- * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
- * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+ * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
+ * comment for __cpuset_node_allowed_softwall().
*/
alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_task(current)) && !in_interrupt())
@@ -6062,11 +6062,13 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
}
/**
- * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
+ * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
* @page: The page within the block of interest
- * @start_bitidx: The first bit of interest to retrieve
- * @end_bitidx: The last bit of interest
- * returns pageblock_bits flags
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest to retrieve
+ * @mask: mask of bits that the caller is interested in
+ *
+ * Return: pageblock_bits flags
*/
unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
unsigned long end_bitidx,
@@ -6091,9 +6093,10 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
/**
* set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
* @page: The page within the block of interest
- * @start_bitidx: The first bit of interest
- * @end_bitidx: The last bit of interest
* @flags: The flags to set
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest
+ * @mask: mask of bits that the caller is interested in
*/
void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
unsigned long pfn,
diff --git a/mm/rmap.c b/mm/rmap.c
index b7e94ebbd09e..22a4a7699cdb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
static inline unsigned long
__vma_address(struct page *page, struct vm_area_struct *vma)
{
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
- if (unlikely(is_vm_hugetlb_page(vma)))
- pgoff = page->index << huge_page_order(page_hstate(page));
-
+ pgoff_t pgoff = page_to_pgoff(page);
return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
}
@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff_t pgoff = page_to_pgoff(page);
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
{
struct address_space *mapping = page->mapping;
- pgoff_t pgoff = page->index << compound_order(page);
+ pgoff_t pgoff = page_to_pgoff(page);
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
diff --git a/mm/shmem.c b/mm/shmem.c
index 1140f49b6ded..af68b15a8fc1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
* a time): we would prefer not to enlarge the shmem inode just for that.
*/
struct shmem_falloc {
- int mode; /* FALLOC_FL mode currently operating */
+ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
pgoff_t start; /* start of range currently being fallocated */
pgoff_t next; /* the next page offset to be fallocated */
pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
return;
index = start;
- for ( ; ; ) {
+ while (index < end) {
cond_resched();
pvec.nr = find_get_entries(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr) {
- if (index == start || unfalloc)
+ /* If all gone or hole-punch or unfalloc, we're done */
+ if (index == start || end != -1)
break;
+ /* But if truncating, restart to make sure all gone */
index = start;
continue;
}
- if ((index == start || unfalloc) && indices[0] >= end) {
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
- break;
- }
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (radix_tree_exceptional_entry(page)) {
if (unfalloc)
continue;
- nr_swaps_freed += !shmem_free_swap(mapping,
- index, page);
+ if (shmem_free_swap(mapping, index, page)) {
+ /* Swap was replaced by page: retry */
+ index--;
+ break;
+ }
+ nr_swaps_freed++;
continue;
}
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (page->mapping == mapping) {
VM_BUG_ON_PAGE(PageWriteback(page), page);
truncate_inode_page(mapping, page);
+ } else {
+ /* Page was replaced by swap: retry */
+ unlock_page(page);
+ index--;
+ break;
}
}
unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
spin_lock(&inode->i_lock);
shmem_falloc = inode->i_private;
if (shmem_falloc &&
- !shmem_falloc->mode &&
+ !shmem_falloc->waitq &&
index >= shmem_falloc->start &&
index < shmem_falloc->next)
shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Trinity finds that probing a hole which tmpfs is punching can
* prevent the hole-punch from ever completing: which in turn
* locks writers out with its hold on i_mutex. So refrain from
- * faulting pages into the hole while it's being punched, and
- * wait on i_mutex to be released if vmf->flags permits.
+ * faulting pages into the hole while it's being punched. Although
+ * shmem_undo_range() does remove the additions, it may be unable to
+ * keep up, as each new page needs its own unmap_mapping_range() call,
+ * and the i_mmap tree grows ever slower to scan if new vmas are added.
+ *
+ * It does not matter if we sometimes reach this check just before the
+ * hole-punch begins, so that one fault then races with the punch:
+ * we just need to make racing faults a rare case.
+ *
+ * The implementation below would be much simpler if we just used a
+ * standard mutex or completion: but we cannot take i_mutex in fault,
+ * and bloating every shmem inode for this unlikely case would be sad.
*/
if (unlikely(inode->i_private)) {
struct shmem_falloc *shmem_falloc;
spin_lock(&inode->i_lock);
shmem_falloc = inode->i_private;
- if (!shmem_falloc ||
- shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
- vmf->pgoff < shmem_falloc->start ||
- vmf->pgoff >= shmem_falloc->next)
- shmem_falloc = NULL;
- spin_unlock(&inode->i_lock);
- /*
- * i_lock has protected us from taking shmem_falloc seriously
- * once return from shmem_fallocate() went back up that stack.
- * i_lock does not serialize with i_mutex at all, but it does
- * not matter if sometimes we wait unnecessarily, or sometimes
- * miss out on waiting: we just need to make those cases rare.
- */
- if (shmem_falloc) {
+ if (shmem_falloc &&
+ shmem_falloc->waitq &&
+ vmf->pgoff >= shmem_falloc->start &&
+ vmf->pgoff < shmem_falloc->next) {
+ wait_queue_head_t *shmem_falloc_waitq;
+ DEFINE_WAIT(shmem_fault_wait);
+
+ ret = VM_FAULT_NOPAGE;
if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* It's polite to up mmap_sem if we can */
up_read(&vma->vm_mm->mmap_sem);
- mutex_lock(&inode->i_mutex);
- mutex_unlock(&inode->i_mutex);
- return VM_FAULT_RETRY;
+ ret = VM_FAULT_RETRY;
}
- /* cond_resched? Leave that to GUP or return to user */
- return VM_FAULT_NOPAGE;
+
+ shmem_falloc_waitq = shmem_falloc->waitq;
+ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock(&inode->i_lock);
+ schedule();
+
+ /*
+ * shmem_falloc_waitq points into the shmem_fallocate()
+ * stack of the hole-punching task: shmem_falloc_waitq
+ * is usually invalid by the time we reach here, but
+ * finish_wait() does not dereference it in that case;
+ * though i_lock needed lest racing with wake_up_all().
+ */
+ spin_lock(&inode->i_lock);
+ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+ spin_unlock(&inode->i_lock);
+ return ret;
}
+ spin_unlock(&inode->i_lock);
}
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
mutex_lock(&inode->i_mutex);
- shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
-
if (mode & FALLOC_FL_PUNCH_HOLE) {
struct address_space *mapping = file->f_mapping;
loff_t unmap_start = round_up(offset, PAGE_SIZE);
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+ shmem_falloc.waitq = &shmem_falloc_waitq;
shmem_falloc.start = unmap_start >> PAGE_SHIFT;
shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1 + unmap_end - unmap_start, 0);
shmem_truncate_range(inode, offset, offset + len - 1);
/* No need to unmap again: hole-punching leaves COWed pages */
+
+ spin_lock(&inode->i_lock);
+ inode->i_private = NULL;
+ wake_up_all(&shmem_falloc_waitq);
+ spin_unlock(&inode->i_lock);
error = 0;
- goto undone;
+ goto out;
}
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
goto out;
}
+ shmem_falloc.waitq = NULL;
shmem_falloc.start = start;
shmem_falloc.next = start;
shmem_falloc.nr_falloced = 0;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 735e01a0db6f..d31c4bacc6a2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
continue;
}
-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
+#if !defined(CONFIG_SLUB)
if (!strcmp(s->name, name)) {
pr_err("%s (%s): Cache name already exists.\n",
__func__, name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 6a78c814bebf..eda247307164 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
for ( ; ; ) {
cond_resched();
if (!pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE),
- indices)) {
+ min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
+ /* If all gone from start onwards, we're done */
if (index == start)
break;
+ /* Otherwise restart to make sure all gone */
index = start;
continue;
}
if (index == start && indices[0] >= end) {
+ /* All gone out of hole to be punched, we're done */
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
/* We rely upon deletion not changing page->index */
index = indices[i];
- if (index >= end)
+ if (index >= end) {
+ /* Restart punch to make sure all gone */
+ index = start - 1;
break;
+ }
if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ad2ac3c00398..dd11f612e03e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev)
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
int i;
- free_percpu(vlan->vlan_pcpu_stats);
- vlan->vlan_pcpu_stats = NULL;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
while ((pm = vlan->egress_priority_map[i]) != NULL) {
vlan->egress_priority_map[i] = pm->next;
@@ -785,6 +783,15 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
};
+static void vlan_dev_free(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ free_percpu(vlan->vlan_pcpu_stats);
+ vlan->vlan_pcpu_stats = NULL;
+ free_netdev(dev);
+}
+
void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -794,7 +801,7 @@ void vlan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = vlan_dev_free;
dev->ethtool_ops = &vlan_ethtool_ops;
memset(dev->broadcast, 0, ETH_ALEN);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 01a1082e02b3..bfcf6be1d665 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
/* Queue packet (standard) */
- skb->sk = sock;
-
if (sock_queue_rcv_skb(sock, skb) < 0)
goto drop;
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
if (!skb)
goto out;
- skb->sk = sk;
skb_reserve(skb, ddp_dl->header_length);
skb_reserve(skb, dev->hard_header_len);
skb->dev = dev;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6f0d9ec37950..a957c8140721 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
bla_dst_own = &bat_priv->bla.claim_dest;
- /* check if it is a claim packet in general */
- if (memcmp(bla_dst->magic, bla_dst_own->magic,
- sizeof(bla_dst->magic)) != 0)
- return 0;
-
/* if announcement packet, use the source,
* otherwise assume it is in the hw_src
*/
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
struct sk_buff *skb)
{
- struct batadv_bla_claim_dst *bla_dst;
+ struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
uint8_t *hw_src, *hw_dst;
- struct vlan_ethhdr *vhdr;
+ struct vlan_hdr *vhdr, vhdr_buf;
struct ethhdr *ethhdr;
struct arphdr *arphdr;
unsigned short vid;
+ int vlan_depth = 0;
__be16 proto;
int headlen;
int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
proto = ethhdr->h_proto;
headlen = ETH_HLEN;
if (vid & BATADV_VLAN_HAS_TAG) {
- vhdr = vlan_eth_hdr(skb);
- proto = vhdr->h_vlan_encapsulated_proto;
- headlen += VLAN_HLEN;
+ /* Traverse the VLAN/Ethertypes.
+ *
+ * At this point it is known that the first protocol is a VLAN
+ * header, so start checking at the encapsulated protocol.
+ *
+ * The depth of the VLAN headers is recorded to drop BLA claim
+ * frames encapsulated into multiple VLAN headers (QinQ).
+ */
+ do {
+ vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
+ &vhdr_buf);
+ if (!vhdr)
+ return 0;
+
+ proto = vhdr->h_vlan_encapsulated_proto;
+ headlen += VLAN_HLEN;
+ vlan_depth++;
+ } while (proto == htons(ETH_P_8021Q));
}
if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
hw_dst = hw_src + ETH_ALEN + 4;
bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
+ bla_dst_own = &bat_priv->bla.claim_dest;
+
+ /* check if it is a claim frame in general */
+ if (memcmp(bla_dst->magic, bla_dst_own->magic,
+ sizeof(bla_dst->magic)) != 0)
+ return 0;
+
+ /* check if there is a claim frame encapsulated deeper in (QinQ) and
+ * drop that, as this is not supported by BLA but should also not be
+ * sent via the mesh.
+ */
+ if (vlan_depth > 1)
+ return 1;
/* check if it is a claim frame. */
ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e7ee65dc20bf..cbd677f48c00 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -448,10 +448,15 @@ out:
* possibly free it
* @softif_vlan: the vlan object to release
*/
-void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
{
- if (atomic_dec_and_test(&softif_vlan->refcount))
- kfree_rcu(softif_vlan, rcu);
+ if (atomic_dec_and_test(&vlan->refcount)) {
+ spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+ hlist_del_rcu(&vlan->list);
+ spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+
+ kfree_rcu(vlan, rcu);
+ }
}
/**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
if (!vlan)
return -ENOMEM;
+ vlan->bat_priv = bat_priv;
vlan->vid = vid;
atomic_set(&vlan->refcount, 1);
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
return err;
}
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+ hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
bat_priv->soft_iface->dev_addr, vid,
BATADV_NULL_IFINDEX, BATADV_NO_MARK);
- spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
- spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
return 0;
}
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
struct batadv_softif_vlan *vlan)
{
- spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- hlist_del_rcu(&vlan->list);
- spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
- batadv_sysfs_del_vlan(bat_priv, vlan);
-
/* explicitly remove the associated TT local entry because it is marked
* with the NOPURGE flag
*/
batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
vlan->vid, "vlan interface destroyed", false);
+ batadv_sysfs_del_vlan(bat_priv, vlan);
batadv_softif_vlan_free_ref(vlan);
}
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
unsigned short vid)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_softif_vlan *vlan;
+ int ret;
/* only 802.1Q vlans are supported.
* batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
vid |= BATADV_VLAN_HAS_TAG;
- return batadv_softif_create_vlan(bat_priv, vid);
+ /* if a new vlan is getting created and it already exists, it means that
+ * it was not deleted yet. batadv_softif_vlan_get() increases the
+ * refcount in order to revive the object.
+ *
+ * if it does not exist then create it.
+ */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan)
+ return batadv_softif_create_vlan(bat_priv, vid);
+
+ /* recreate the sysfs object if it was already destroyed (and it should
+ * be since we received a kill_vid() for this vlan
+ */
+ if (!vlan->kobj) {
+ ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+ if (ret) {
+ batadv_softif_vlan_free_ref(vlan);
+ return ret;
+ }
+ }
+
+ /* add a new TT local entry. This one will be marked with the NOPURGE
+ * flag. This must be added again, even if the vlan object already
+ * exists, because the entry was deleted by kill_vid()
+ */
+ batadv_tt_local_add(bat_priv->soft_iface,
+ bat_priv->soft_iface->dev_addr, vid,
+ BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+
+ return 0;
}
/**
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d636bde72c9a..5f59e7f899a0 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_tt_local_entry *tt_local;
struct batadv_tt_global_entry *tt_global = NULL;
+ struct batadv_softif_vlan *vlan;
struct net_device *in_dev = NULL;
struct hlist_head *head;
struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (!tt_local)
goto out;
+ /* increase the refcounter of the related vlan */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
batadv_tt_local_entry_free_ref(tt_local);
+ batadv_softif_vlan_free_ref(vlan);
goto out;
}
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
{
struct batadv_tt_local_entry *tt_local_entry;
uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+ struct batadv_softif_vlan *vlan;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
hlist_del_rcu(&tt_local_entry->common.hash_entry);
batadv_tt_local_entry_free_ref(tt_local_entry);
+ /* decrease the reference held for this vlan */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+
out:
if (tt_local_entry)
batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
spinlock_t *list_lock; /* protects write access to the hash lists */
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local;
+ struct batadv_softif_vlan *vlan;
struct hlist_node *node_tmp;
struct hlist_head *head;
uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
tt_local = container_of(tt_common_entry,
struct batadv_tt_local_entry,
common);
+
+ /* decrease the reference held for this vlan */
+ vlan = batadv_softif_vlan_get(bat_priv,
+ tt_common_entry->vid);
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+
batadv_tt_local_entry_free_ref(tt_local);
}
spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_local_entry *tt_local;
+ struct batadv_softif_vlan *vlan;
struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
tt_local = container_of(tt_common,
struct batadv_tt_local_entry,
common);
+
+ /* decrease the reference held for this vlan */
+ vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+
batadv_tt_local_entry_free_ref(tt_local);
}
spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 34891a56773f..8854c05622a9 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
/**
* struct batadv_softif_vlan - per VLAN attributes set
+ * @bat_priv: pointer to the mesh object
* @vid: VLAN identifier
* @kobj: kobject for sysfs vlan subdirectory
* @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
* @rcu: struct used for freeing in a RCU-safe manner
*/
struct batadv_softif_vlan {
+ struct batadv_priv *bat_priv;
unsigned short vid;
struct kobject *kobj;
atomic_t ap_isolation; /* boolean */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ca01d1861854..a7a27bc2c0b1 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -289,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn,
disc_work.work);
+ int refcnt = atomic_read(&conn->refcnt);
BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
- if (atomic_read(&conn->refcnt))
+ WARN_ON(refcnt < 0);
+
+ /* FIXME: It was observed that in pairing failed scenario, refcnt
+ * drops below 0. Probably this is because l2cap_conn_del calls
+ * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
+ * dropped. After that loop hci_chan_del is called which also drops
+ * conn. For now make sure that ACL is alive if refcnt is higher then 0,
+ * otherwise drop it.
+ */
+ if (refcnt > 0)
return;
switch (conn->state) {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index f2829a7932e2..e33a982161c1 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -385,6 +385,16 @@ static const u8 gen_method[5][5] = {
{ CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
};
+static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
+{
+ /* If either side has unknown io_caps, use JUST WORKS */
+ if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
+ remote_io > SMP_IO_KEYBOARD_DISPLAY)
+ return JUST_WORKS;
+
+ return gen_method[remote_io][local_io];
+}
+
static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
u8 local_io, u8 remote_io)
{
@@ -401,14 +411,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
/* If neither side wants MITM, use JUST WORKS */
- /* If either side has unknown io_caps, use JUST WORKS */
/* Otherwise, look up method from the table */
- if (!(auth & SMP_AUTH_MITM) ||
- local_io > SMP_IO_KEYBOARD_DISPLAY ||
- remote_io > SMP_IO_KEYBOARD_DISPLAY)
+ if (!(auth & SMP_AUTH_MITM))
method = JUST_WORKS;
else
- method = gen_method[remote_io][local_io];
+ method = get_auth_method(smp, local_io, remote_io);
/* If not bonding, don't ask user to confirm a Zero TK */
if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -669,7 +676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
struct smp_chan *smp;
- u8 key_size, auth;
+ u8 key_size, auth, sec_level;
int ret;
BT_DBG("conn %p", conn);
@@ -695,7 +702,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
/* We didn't start the pairing, so match remote */
auth = req->auth_req;
- conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
+ sec_level = authreq_to_seclevel(auth);
+ if (sec_level > conn->hcon->pending_sec_level)
+ conn->hcon->pending_sec_level = sec_level;
+
+ /* If we need MITM check that it can be acheived */
+ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+ u8 method;
+
+ method = get_auth_method(smp, conn->hcon->io_capability,
+ req->io_capability);
+ if (method == JUST_WORKS || method == JUST_CFM)
+ return SMP_AUTH_REQUIREMENTS;
+ }
build_pairing_cmd(conn, req, &rsp, auth);
@@ -743,6 +762,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
+ /* If we need MITM check that it can be acheived */
+ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+ u8 method;
+
+ method = get_auth_method(smp, req->io_capability,
+ rsp->io_capability);
+ if (method == JUST_WORKS || method == JUST_CFM)
+ return SMP_AUTH_REQUIREMENTS;
+ }
+
get_random_bytes(smp->prnd, sizeof(smp->prnd));
smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -838,6 +867,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_cmd_pairing cp;
struct hci_conn *hcon = conn->hcon;
struct smp_chan *smp;
+ u8 sec_level;
BT_DBG("conn %p", conn);
@@ -847,7 +877,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (!(conn->hcon->link_mode & HCI_LM_MASTER))
return SMP_CMD_NOTSUPP;
- hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
+ sec_level = authreq_to_seclevel(rp->auth_req);
+ if (sec_level > hcon->pending_sec_level)
+ hcon->pending_sec_level = sec_level;
if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
return 0;
@@ -901,9 +933,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
if (smp_sufficient_security(hcon, sec_level))
return 1;
+ if (sec_level > hcon->pending_sec_level)
+ hcon->pending_sec_level = sec_level;
+
if (hcon->link_mode & HCI_LM_MASTER)
- if (smp_ltk_encrypt(conn, sec_level))
- goto done;
+ if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
+ return 0;
if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
return 0;
@@ -918,7 +953,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
* requires it.
*/
if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
- sec_level > BT_SECURITY_MEDIUM)
+ hcon->pending_sec_level > BT_SECURITY_MEDIUM)
authreq |= SMP_AUTH_MITM;
if (hcon->link_mode & HCI_LM_MASTER) {
@@ -937,9 +972,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
set_bit(SMP_FLAG_INITIATOR, &smp->flags);
-done:
- hcon->pending_sec_level = sec_level;
-
return 0;
}
diff --git a/net/compat.c b/net/compat.c
index 9a76eaf63184..bc8aeefddf3f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
{
int tot_len;
- if (kern_msg->msg_namelen) {
+ if (kern_msg->msg_name && kern_msg->msg_namelen) {
if (mode == VERIFY_READ) {
int err = move_addr_to_kernel(kern_msg->msg_name,
kern_msg->msg_namelen,
@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
if (err < 0)
return err;
}
- if (kern_msg->msg_name)
- kern_msg->msg_name = kern_address;
- } else
+ kern_msg->msg_name = kern_address;
+ } else {
kern_msg->msg_name = NULL;
+ kern_msg->msg_namelen = 0;
+ }
tot_len = iov_from_user_compat_to_kern(kern_iov,
(struct compat_iovec __user *)kern_msg->msg_iov,
diff --git a/net/core/dev.c b/net/core/dev.c
index 30eedf677913..367a586d0c8a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
static struct list_head offload_base __read_mostly;
static int netif_rx_internal(struct sk_buff *skb);
+static int call_netdevice_notifiers_info(unsigned long val,
+ struct net_device *dev,
+ struct netdev_notifier_info *info);
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
- call_netdevice_notifiers(NETDEV_CHANGE, dev);
+ struct netdev_notifier_change_info change_info;
+
+ change_info.flags_changed = 0;
+ call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
+ &change_info.info);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
}
}
@@ -4089,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->vlan_tci = 0;
skb->dev = napi->dev;
skb->skb_iif = 0;
+ skb->encapsulation = 0;
+ skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
napi->skb = skb;
@@ -4227,9 +4236,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
#endif
napi->weight = weight_p;
local_irq_disable();
- while (work < quota) {
+ while (1) {
struct sk_buff *skb;
- unsigned int qlen;
while ((skb = __skb_dequeue(&sd->process_queue))) {
local_irq_enable();
@@ -4243,24 +4251,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
}
rps_lock(sd);
- qlen = skb_queue_len(&sd->input_pkt_queue);
- if (qlen)
- skb_queue_splice_tail_init(&sd->input_pkt_queue,
- &sd->process_queue);
-
- if (qlen < quota - work) {
+ if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
* only current cpu owns and manipulates this napi,
- * and NAPI_STATE_SCHED is the only possible flag set on backlog.
- * we can use a plain write instead of clear_bit(),
+ * and NAPI_STATE_SCHED is the only possible flag set
+ * on backlog.
+ * We can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
list_del(&napi->poll_list);
napi->state = 0;
+ rps_unlock(sd);
- quota = work + qlen;
+ break;
}
+
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
+ &sd->process_queue);
rps_unlock(sd);
}
local_irq_enable();
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 827dd6beb49c..e1ec45ab1e63 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
{
int size, ct, err;
- if (m->msg_namelen) {
+ if (m->msg_name && m->msg_namelen) {
if (mode == VERIFY_READ) {
void __user *namep;
namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
if (err < 0)
return err;
}
- if (m->msg_name)
- m->msg_name = address;
+ m->msg_name = address;
} else {
m->msg_name = NULL;
+ m->msg_namelen = 0;
}
size = m->msg_iovlen * sizeof(struct iovec);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 32d872eec7f5..ef31fef25e5a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2249,7 +2249,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
ndm->ndm_flags = pn->flags | NTF_PROXY;
- ndm->ndm_type = NDA_DST;
+ ndm->ndm_type = RTN_UNICAST;
ndm->ndm_ifindex = pn->dev->ifindex;
ndm->ndm_state = NUD_NONE;
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
} else {
+ struct neigh_table *tbl = p->tbl;
dev_name_source = "default";
- t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
- t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
- t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
- t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
+ t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
}
if (handler) {
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 9acec61f5433..dd8696a3dbec 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
goto put;
memcpy(*_result, upayload->data, len);
- *_result[len] = '\0';
+ (*_result)[len] = '\0';
if (_expiry)
*_expiry = rkey->expiry;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d5e6836cf772..d156b3c5f363 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
int proto = iph->protocol;
int err = -ENOSYS;
+ if (skb->encapsulation)
+ skb_set_inner_network_header(skb, nhoff);
+
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4e9619bca732..0485bf7f8f03 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
skb_push(skb, hdr_len);
+ skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)skb->data;
greh->flags = tnl_flags_to_gre_flags(tpi->flags);
greh->protocol = tpi->proto;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index eb92deb12666..f0bdd47bbbcb 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
int err = -ENOENT;
__be16 type;
+ skb->encapsulation = 1;
+ skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
+
type = greh->protocol;
if (greh->flags & GRE_KEY)
grehlen += GRE_HEADER_SECTION;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79c3d947a481..42b7bcf8045b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
/* fall through */
case 0:
info = ntohs(icmph->un.frag.mtu);
- if (!info)
- goto out;
}
break;
case ICMP_SR_FAILED:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6748d420f714..db710b059bab 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
rtnl_lock();
in_dev = ip_mc_find_dev(net, imr);
+ if (!in_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list;
(iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
*imlp = iml->next_rcu;
- if (in_dev)
- ip_mc_dec_group(in_dev, group);
+ ip_mc_dec_group(in_dev, group);
rtnl_unlock();
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
return 0;
}
- if (!in_dev)
- ret = -ENODEV;
+out:
rtnl_unlock();
return ret;
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5e7aecea05cd..ad382499bace 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
optptr++;
continue;
}
+ if (unlikely(l < 2)) {
+ pp_ptr = optptr;
+ goto error;
+ }
optlen = optptr[1];
if (optlen < 2 || optlen > l) {
pp_ptr = optptr;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 54b6731dab55..6f9de61dce5f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -169,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
hlist_for_each_entry_rcu(t, head, hash_node) {
if (remote != t->parms.iph.daddr ||
+ t->parms.iph.saddr != 0 ||
!(t->dev->flags & IFF_UP))
continue;
@@ -185,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
head = &itn->tunnels[hash];
hlist_for_each_entry_rcu(t, head, hash_node) {
- if ((local != t->parms.iph.saddr &&
- (local != t->parms.iph.daddr ||
- !ipv4_is_multicast(local))) ||
- !(t->dev->flags & IFF_UP))
+ if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
+ (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
+ continue;
+
+ if (!(t->dev->flags & IFF_UP))
continue;
if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -205,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
hlist_for_each_entry_rcu(t, head, hash_node) {
if (t->parms.i_key != key ||
+ t->parms.iph.saddr != 0 ||
+ t->parms.iph.daddr != 0 ||
!(t->dev->flags & IFF_UP))
continue;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 082239ffe34a..190199851c9a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -457,8 +457,31 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
return neigh_create(&arp_tbl, pkey, dev);
}
-atomic_t *ip_idents __read_mostly;
-EXPORT_SYMBOL(ip_idents);
+#define IP_IDENTS_SZ 2048u
+struct ip_ident_bucket {
+ atomic_t id;
+ u32 stamp32;
+};
+
+static struct ip_ident_bucket *ip_idents __read_mostly;
+
+/* In order to protect privacy, we add a perturbation to identifiers
+ * if one generator is seldom used. This makes hard for an attacker
+ * to infer how many packets were sent between two points in time.
+ */
+u32 ip_idents_reserve(u32 hash, int segs)
+{
+ struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
+ u32 old = ACCESS_ONCE(bucket->stamp32);
+ u32 now = (u32)jiffies;
+ u32 delta = 0;
+
+ if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
+ delta = prandom_u32_max(now - old);
+
+ return atomic_add_return(segs + delta, &bucket->id) - segs;
+}
+EXPORT_SYMBOL(ip_idents_reserve);
void __ip_select_ident(struct iphdr *iph, int segs)
{
@@ -467,7 +490,10 @@ void __ip_select_ident(struct iphdr *iph, int segs)
net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
- hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
+ hash = jhash_3words((__force u32)iph->daddr,
+ (__force u32)iph->saddr,
+ iph->protocol,
+ ip_idents_hashrnd);
id = ip_idents_reserve(hash, segs);
iph->id = htons(id);
}
@@ -1010,7 +1036,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
- struct dst_entry *dst;
+ struct dst_entry *odst = NULL;
bool new = false;
bh_lock_sock(sk);
@@ -1018,16 +1044,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
if (!ip_sk_accept_pmtu(sk))
goto out;
- rt = (struct rtable *) __sk_dst_get(sk);
+ odst = sk_dst_get(sk);
- if (sock_owned_by_user(sk) || !rt) {
+ if (sock_owned_by_user(sk) || !odst) {
__ipv4_sk_update_pmtu(skb, sk, mtu);
goto out;
}
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
- if (!__sk_dst_check(sk, 0)) {
+ rt = (struct rtable *)odst;
+ if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
@@ -1037,8 +1064,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
- dst = dst_check(&rt->dst, 0);
- if (!dst) {
+ if (!dst_check(&rt->dst, 0)) {
if (new)
dst_release(&rt->dst);
@@ -1050,10 +1076,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
}
if (new)
- __sk_dst_set(sk, &rt->dst);
+ sk_dst_set(sk, &rt->dst);
out:
bh_unlock_sock(sk);
+ dst_release(odst);
}
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb1dde37e678..9d2118e5fbc7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (unlikely(tp->repair)) {
if (tp->repair_queue == TCP_RECV_QUEUE) {
copied = tcp_send_rcvq(sk, msg, size);
- goto out;
+ goto out_nopush;
}
err = -EINVAL;
@@ -1282,6 +1282,7 @@ wait_for_memory:
out:
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
+out_nopush:
release_sock(sk);
return copied + copied_syn;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b5c23756965a..40639c288dc2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
}
/* D-SACK for already forgotten data... Do dumb counting. */
- if (dup_sack && tp->undo_marker && tp->undo_retrans &&
+ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
!after(end_seq_0, prior_snd_una) &&
after(end_seq_0, tp->undo_marker))
tp->undo_retrans--;
@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
/* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) {
- if (tp->undo_marker && tp->undo_retrans &&
+ if (tp->undo_marker && tp->undo_retrans > 0 &&
after(end_seq, tp->undo_marker))
tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
tp->lost_out = 0;
tp->undo_marker = 0;
- tp->undo_retrans = 0;
+ tp->undo_retrans = -1;
}
void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
tp->prior_ssthresh = 0;
tp->undo_marker = tp->snd_una;
- tp->undo_retrans = tp->retrans_out;
+ tp->undo_retrans = tp->retrans_out ? : -1;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
if (!ece_ack)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4e86c59ec7f7..55046ecd083e 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
return tcp_gro_complete(skb);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d92bce0ea24e..179b51e6bda3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (!tp->retrans_stamp)
tp->retrans_stamp = TCP_SKB_CB(skb)->when;
- tp->undo_retrans += tcp_skb_pcount(skb);
-
/* snd_nxt is stored to detect loss of retransmitted segment,
* see tcp_input.c tcp_sacktag_write_queue().
*/
@@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
} else if (err != -EBUSY) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
+
+ if (tp->undo_retrans < 0)
+ tp->undo_retrans = 0;
+ tp->undo_retrans += tcp_skb_pcount(skb);
return err;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d92f94b7e402..7d5a8661df76 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1588,8 +1588,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto csum_error;
- if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+ if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+ is_udplite);
goto drop;
+ }
rc = 0;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cb9df0eb4023..45702b8cd141 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -545,6 +545,8 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
+ hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
+
id = ip_idents_reserve(hash, 1);
fhdr->identification = htonl(id);
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 08b367c6b9cf..617f0958e164 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
len -= skb_network_header_len(skb);
- /* Drop queries with not link local source */
- if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
+ /* RFC3810 6.2
+ * Upon reception of an MLD message that contains a Query, the node
+ * checks if the source address of the message is a valid link-local
+ * address, if the Hop Limit is set to 1, and if the Router Alert
+ * option is present in the Hop-By-Hop Options header of the IPv6
+ * packet. If any of these checks fails, the packet is dropped.
+ */
+ if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
+ ipv6_hdr(skb)->hop_limit != 1 ||
+ !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
+ IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
return -EINVAL;
idev = __in6_dev_get(skb->dev);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 8517d3cd1aed..01b0ff9a0c2c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
&iph->daddr, 0);
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
return tcp_gro_complete(skb);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 95c834799288..7092ff78fd84 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -674,8 +674,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto csum_error;
}
- if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+ if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, is_udplite);
goto drop;
+ }
skb_dst_drop(skb);
@@ -690,6 +693,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
bh_unlock_sock(sk);
return rc;
+
csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 950909f04ee6..13752d96275e 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
int err;
if (level != SOL_PPPOL2TP)
- return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+ return -EINVAL;
if (optlen < sizeof(int))
return -EINVAL;
@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
struct pppol2tp_session *ps;
if (level != SOL_PPPOL2TP)
- return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+ return -EINVAL;
if (get_user(len, optlen))
return -EFAULT;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d7513a503be1..592f4b152ba8 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -472,12 +472,15 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
- struct rate_control_ref *ref = local->rate_ctrl;
+ struct rate_control_ref *ref = NULL;
struct timespec uptime;
u64 packets = 0;
u32 thr = 0;
int i, ac;
+ if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
+ ref = local->rate_ctrl;
+
sinfo->generation = sdata->local->sta_generation;
sinfo->filled = STATION_INFO_INACTIVE_TIME |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5214686d9fd1..1a252c606ad0 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -414,6 +414,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (ieee80211_has_order(hdr->frame_control))
return TX_CONTINUE;
+ if (ieee80211_is_probe_req(hdr->frame_control))
+ return TX_CONTINUE;
+
if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
info->hw_queue = tx->sdata->vif.cab_queue;
@@ -463,6 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
{
struct sta_info *sta = tx->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_local *local = tx->local;
if (unlikely(!sta))
@@ -473,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
int ac = skb_get_queue_mapping(tx->skb);
+ if (ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
+ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+ return TX_CONTINUE;
+ }
+
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
sta->sta.addr, sta->sta.aid, ac);
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -531,19 +541,9 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
-
if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
return TX_CONTINUE;
- if (ieee80211_is_mgmt(hdr->frame_control) &&
- !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
- if (tx->flags & IEEE80211_TX_UNICAST)
- info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
- return TX_CONTINUE;
- }
-
if (tx->flags & IEEE80211_TX_UNICAST)
return ieee80211_tx_h_unicast_ps_buf(tx);
else
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 6886601afe1c..a6cda52ed920 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1096,11 +1096,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
int err;
/* 24 + 6 = header + auth_algo + auth_transaction + status_code */
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len);
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
+ 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
if (!skb)
return;
- skb_reserve(skb, local->hw.extra_tx_headroom);
+ skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
memset(mgmt, 0, 24 + 6);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a8eb0a89326a..610e19c0e13f 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
ip_vs_control_del(cp);
if (cp->flags & IP_VS_CONN_F_NFCT) {
- ip_vs_conn_drop_conntrack(cp);
/* Do not access conntracks during subsys cleanup
* because nf_conntrack_find_get can not be used after
* conntrack cleanup for the net.
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ab4566cfcbe4..8746ff9a8357 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
{
INIT_LIST_HEAD(&afi->tables);
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_add_tail(&afi->list, &net->nft.af_info);
+ list_add_tail_rcu(&afi->list, &net->nft.af_info);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return 0;
}
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
void nft_unregister_afinfo(struct nft_af_info *afi)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_del(&afi->list);
+ list_del_rcu(&afi->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
- list_for_each_entry(afi, &net->nft.af_info, list) {
+ rcu_read_lock();
+ cb->seq = net->nft.base_seq;
+
+ list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
if (family != NFPROTO_UNSPEC && family != afi->family)
continue;
- list_for_each_entry(table, &afi->tables, list) {
+ list_for_each_entry_rcu(table, &afi->tables, list) {
if (idx < s_idx)
goto cont;
if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
NLM_F_MULTI,
afi->family, table) < 0)
goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
done:
+ rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if (flags & ~NFT_TABLE_F_DORMANT)
return -EINVAL;
+ if (flags == ctx->table->flags)
+ return 0;
+
trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
sizeof(struct nft_trans_table));
if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
module_put(afi->owner);
return err;
}
- list_add_tail(&table->list, &afi->tables);
+ list_add_tail_rcu(&table->list, &afi->tables);
return 0;
}
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
if (err < 0)
return err;
- list_del(&table->list);
+ list_del_rcu(&table->list);
return 0;
}
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
{
struct nft_stats *cpu_stats, total;
struct nlattr *nest;
+ unsigned int seq;
+ u64 pkts, bytes;
int cpu;
memset(&total, 0, sizeof(total));
for_each_possible_cpu(cpu) {
cpu_stats = per_cpu_ptr(stats, cpu);
- total.pkts += cpu_stats->pkts;
- total.bytes += cpu_stats->bytes;
+ do {
+ seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ pkts = cpu_stats->pkts;
+ bytes = cpu_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+ total.pkts += pkts;
+ total.bytes += bytes;
}
nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
- list_for_each_entry(afi, &net->nft.af_info, list) {
+ rcu_read_lock();
+ cb->seq = net->nft.base_seq;
+
+ list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
if (family != NFPROTO_UNSPEC && family != afi->family)
continue;
- list_for_each_entry(table, &afi->tables, list) {
- list_for_each_entry(chain, &table->chains, list) {
+ list_for_each_entry_rcu(table, &afi->tables, list) {
+ list_for_each_entry_rcu(chain, &table->chains, list) {
if (idx < s_idx)
goto cont;
if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
NLM_F_MULTI,
afi->family, table, chain) < 0)
goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
}
done:
+ rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
-
static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
return ERR_PTR(-EINVAL);
- newstats = alloc_percpu(struct nft_stats);
+ newstats = netdev_alloc_pcpu_stats(struct nft_stats);
if (newstats == NULL)
return ERR_PTR(-ENOMEM);
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
}
basechain->stats = stats;
} else {
- stats = alloc_percpu(struct nft_stats);
+ stats = netdev_alloc_pcpu_stats(struct nft_stats);
if (IS_ERR(stats)) {
module_put(type->owner);
kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
goto err2;
table->use++;
- list_add_tail(&chain->list, &table->chains);
+ list_add_tail_rcu(&chain->list, &table->chains);
return 0;
err2:
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
return err;
table->use--;
- list_del(&chain->list);
+ list_del_rcu(&chain->list);
return 0;
}
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
if (type->family == NFPROTO_UNSPEC)
- list_add_tail(&type->list, &nf_tables_expressions);
+ list_add_tail_rcu(&type->list, &nf_tables_expressions);
else
- list_add(&type->list, &nf_tables_expressions);
+ list_add_rcu(&type->list, &nf_tables_expressions);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return 0;
}
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
void nft_unregister_expr(struct nft_expr_type *type)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_del(&type->list);
+ list_del_rcu(&type->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
- u8 genctr = ACCESS_ONCE(net->nft.genctr);
- u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
- list_for_each_entry(afi, &net->nft.af_info, list) {
+ rcu_read_lock();
+ cb->seq = net->nft.base_seq;
+
+ list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
if (family != NFPROTO_UNSPEC && family != afi->family)
continue;
- list_for_each_entry(table, &afi->tables, list) {
- list_for_each_entry(chain, &table->chains, list) {
- list_for_each_entry(rule, &chain->rules, list) {
+ list_for_each_entry_rcu(table, &afi->tables, list) {
+ list_for_each_entry_rcu(chain, &table->chains, list) {
+ list_for_each_entry_rcu(rule, &chain->rules, list) {
if (!nft_rule_is_active(net, rule))
goto cont;
if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
NLM_F_MULTI | NLM_F_APPEND,
afi->family, table, chain, rule) < 0)
goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
@@ -1579,9 +1603,7 @@ cont:
}
}
done:
- /* Invalidate this dump, a transition to the new generation happened */
- if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
- return -EBUSY;
+ rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
@@ -1932,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
int nft_register_set(struct nft_set_ops *ops)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_add_tail(&ops->list, &nf_tables_set_ops);
+ list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return 0;
}
@@ -1941,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
void nft_unregister_set(struct nft_set_ops *ops)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_del(&ops->list);
+ list_del_rcu(&ops->list);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2234,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
if (cb->args[1])
return skb->len;
- list_for_each_entry(set, &ctx->table->sets, list) {
+ rcu_read_lock();
+ cb->seq = ctx->net->nft.base_seq;
+
+ list_for_each_entry_rcu(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2242,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
cb->args[0] = idx;
goto done;
}
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
cb->args[1] = 1;
done:
+ rcu_read_unlock();
return skb->len;
}
@@ -2260,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
if (cb->args[1])
return skb->len;
- list_for_each_entry(table, &ctx->afi->tables, list) {
+ rcu_read_lock();
+ cb->seq = ctx->net->nft.base_seq;
+
+ list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
if (cur_table) {
if (cur_table != table)
continue;
@@ -2269,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
}
ctx->table = table;
idx = 0;
- list_for_each_entry(set, &ctx->table->sets, list) {
+ list_for_each_entry_rcu(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2278,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
cb->args[2] = (unsigned long) table;
goto done;
}
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
cb->args[1] = 1;
done:
+ rcu_read_unlock();
return skb->len;
}
@@ -2300,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
if (cb->args[1])
return skb->len;
- list_for_each_entry(afi, &net->nft.af_info, list) {
+ rcu_read_lock();
+ cb->seq = net->nft.base_seq;
+
+ list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
if (cur_family) {
if (afi->family != cur_family)
continue;
@@ -2308,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
cur_family = 0;
}
- list_for_each_entry(table, &afi->tables, list) {
+ list_for_each_entry_rcu(table, &afi->tables, list) {
if (cur_table) {
if (cur_table != table)
continue;
@@ -2319,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
ctx->table = table;
ctx->afi = afi;
idx = 0;
- list_for_each_entry(set, &ctx->table->sets, list) {
+ list_for_each_entry_rcu(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
if (nf_tables_fill_set(skb, ctx, set,
@@ -2330,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
cb->args[3] = afi->family;
goto done;
}
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
@@ -2339,6 +2375,7 @@ cont:
}
cb->args[1] = 1;
done:
+ rcu_read_unlock();
return skb->len;
}
@@ -2597,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
if (err < 0)
goto err2;
- list_add_tail(&set->list, &table->sets);
+ list_add_tail_rcu(&set->list, &table->sets);
table->use++;
return 0;
@@ -2617,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set)
static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
{
- list_del(&set->list);
+ list_del_rcu(&set->list);
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
nft_set_destroy(set);
}
@@ -2652,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
if (err < 0)
return err;
- list_del(&set->list);
+ list_del_rcu(&set->list);
ctx.table->use--;
return 0;
}
@@ -2704,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
}
bind:
binding->chain = ctx->chain;
- list_add_tail(&binding->list, &set->bindings);
+ list_add_tail_rcu(&binding->list, &set->bindings);
return 0;
}
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding)
{
- list_del(&binding->list);
+ list_del_rcu(&binding->list);
if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
!(set->flags & NFT_SET_INACTIVE))
@@ -3346,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb)
struct nft_set *set;
/* Bump generation counter, invalidate any dump in progress */
- net->nft.genctr++;
+ while (++net->nft.base_seq == 0);
/* A new generation has just started */
net->nft.gencursor = gencursor_next(net);
@@ -3491,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb)
}
nft_trans_destroy(trans);
} else {
- list_del(&trans->ctx.table->list);
+ list_del_rcu(&trans->ctx.table->list);
}
break;
case NFT_MSG_DELTABLE:
- list_add_tail(&trans->ctx.table->list,
- &trans->ctx.afi->tables);
+ list_add_tail_rcu(&trans->ctx.table->list,
+ &trans->ctx.afi->tables);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWCHAIN:
@@ -3507,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb)
nft_trans_destroy(trans);
} else {
trans->ctx.table->use--;
- list_del(&trans->ctx.chain->list);
+ list_del_rcu(&trans->ctx.chain->list);
if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
trans->ctx.chain->flags & NFT_BASE_CHAIN) {
nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3517,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb)
break;
case NFT_MSG_DELCHAIN:
trans->ctx.table->use++;
- list_add_tail(&trans->ctx.chain->list,
- &trans->ctx.table->chains);
+ list_add_tail_rcu(&trans->ctx.chain->list,
+ &trans->ctx.table->chains);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWRULE:
@@ -3532,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb)
break;
case NFT_MSG_NEWSET:
trans->ctx.table->use--;
- list_del(&nft_trans_set(trans)->list);
+ list_del_rcu(&nft_trans_set(trans)->list);
break;
case NFT_MSG_DELSET:
trans->ctx.table->use++;
- list_add_tail(&nft_trans_set(trans)->list,
- &trans->ctx.table->sets);
+ list_add_tail_rcu(&nft_trans_set(trans)->list,
+ &trans->ctx.table->sets);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSETELEM:
@@ -3951,6 +3988,7 @@ static int nf_tables_init_net(struct net *net)
{
INIT_LIST_HEAD(&net->nft.af_info);
INIT_LIST_HEAD(&net->nft.commit_list);
+ net->nft.base_seq = 1;
return 0;
}
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 345acfb1720b..3b90eb2b2c55 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
struct nft_data data[NFT_REG_MAX + 1];
unsigned int stackptr = 0;
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
- struct nft_stats __percpu *stats;
+ struct nft_stats *stats;
int rulenum;
/*
* Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
rcu_read_lock_bh();
- stats = rcu_dereference(nft_base_chain(basechain)->stats);
- __this_cpu_inc(stats->pkts);
- __this_cpu_add(stats->bytes, pkt->skb->len);
+ stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
+ u64_stats_update_begin(&stats->syncp);
+ stats->pkts++;
+ stats->bytes += pkt->skb->len;
+ u64_stats_update_end(&stats->syncp);
rcu_read_unlock_bh();
return nft_base_chain(basechain)->policy;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 15c731f03fa6..e6fac7e3db52 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
while (nlk->cb_running && netlink_dump_space(nlk)) {
err = netlink_dump(sk);
if (err < 0) {
- sk->sk_err = err;
+ sk->sk_err = -err;
sk->sk_error_report(sk);
break;
}
@@ -2483,7 +2483,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
- sk->sk_err = ret;
+ sk->sk_err = -ret;
sk->sk_error_report(sk);
}
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c36856a457ca..e70d8b18e962 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
case OVS_ACTION_ATTR_SAMPLE:
err = sample(dp, skb, a);
+ if (unlikely(err)) /* skb already freed. */
+ return err;
break;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0d407bca81e3..9db4bf6740d1 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
OVS_CB(skb)->flow = flow;
OVS_CB(skb)->pkt_key = &key;
- ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
+ ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
ovs_execute_actions(dp, skb);
stats_counter = &stats->n_hit;
@@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
/* The unmasked key has to be the same for flow updates. */
if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
- error = -EEXIST;
- goto err_unlock_ovs;
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ if (!flow) {
+ error = -ENOENT;
+ goto err_unlock_ovs;
+ }
}
/* Update actions. */
old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
}
/* Check that the flow exists. */
- flow = ovs_flow_tbl_lookup(&dp->table, &key);
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (unlikely(!flow)) {
error = -ENOENT;
goto err_unlock_ovs;
}
- /* The unmasked key has to be the same for flow updates. */
- if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
- error = -EEXIST;
- goto err_unlock_ovs;
- }
+
/* Update actions, if present. */
if (likely(acts)) {
old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- flow = ovs_flow_tbl_lookup(&dp->table, &key);
- if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ if (!flow) {
err = -ENOENT;
goto unlock;
}
@@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- flow = ovs_flow_tbl_lookup(&dp->table, &key);
- if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
+ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+ if (unlikely(!flow)) {
err = -ENOENT;
goto unlock;
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 334751cb1528..d07ab538fc9d 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
-void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
+ struct sk_buff *skb)
{
struct flow_stats *stats;
- __be16 tcp_flags = flow->key.tp.flags;
int node = numa_node_id();
stats = rcu_dereference(flow->stats[node]);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index ac395d2cd821..5e5aaed3a85b 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@ struct arp_eth_header {
unsigned char ar_tip[4]; /* target IP address */
} __packed;
-void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *);
+void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
+ struct sk_buff *);
void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
unsigned long *used, __be16 *tcp_flags);
void ovs_flow_stats_clear(struct sw_flow *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 574c3abc9b30..cf2d853646f0 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
}
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+ struct sw_flow_match *match)
+{
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+ struct sw_flow_mask *mask;
+ struct sw_flow *flow;
+
+ /* Always called under ovs-mutex. */
+ list_for_each_entry(mask, &tbl->mask_list, list) {
+ flow = masked_flow_lookup(ti, match->key, mask);
+ if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
+ return flow;
+ }
+ return NULL;
+}
+
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
struct sw_flow_mask *mask;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index ca8a5820f615..5918bff7f3f6 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
u32 *n_mask_hit);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
-
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+ struct sw_flow_match *match);
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
struct sw_flow_match *match);
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 35ec4fed09e2..f49148a07da2 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
return PACKET_RCVD;
}
+/* Called with rcu_read_lock and BH disabled. */
+static int gre_err(struct sk_buff *skb, u32 info,
+ const struct tnl_ptk_info *tpi)
+{
+ struct ovs_net *ovs_net;
+ struct vport *vport;
+
+ ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
+ vport = rcu_dereference(ovs_net->vport_net.gre_vport);
+
+ if (unlikely(!vport))
+ return PACKET_REJECT;
+ else
+ return PACKET_RCVD;
+}
+
static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@ error:
static struct gre_cisco_protocol gre_protocol = {
.handler = gre_rcv,
+ .err_handler = gre_err,
.priority = 1,
};
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c39b583ace32..70c0be8d0121 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -38,6 +38,7 @@
#include <linux/errno.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
+#include <linux/bitmap.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
return 0;
}
+#define NR_U32_NODE (1<<12)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{
struct tc_u_knode *n;
- unsigned int i = 0x7FF;
+ unsigned long i;
+ unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!bitmap)
+ return handle | 0xFFF;
for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
- if (i < TC_U32_NODE(n->handle))
- i = TC_U32_NODE(n->handle);
- i++;
+ set_bit(TC_U32_NODE(n->handle), bitmap);
- return handle | (i > 0xFFF ? 0xFFF : i);
+ i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
+ if (i >= NR_U32_NODE)
+ i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
+
+ kfree(bitmap);
+ return handle | (i >= NR_U32_NODE ? 0xFFF : i);
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9de23a222d3f..06a9ee6b2d3a 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1097,6 +1097,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
asoc->peer.sack_needed = new->peer.sack_needed;
+ asoc->peer.auth_capable = new->peer.auth_capable;
asoc->peer.i = new->peer.i;
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
asoc->peer.i.initial_tsn, GFP_ATOMIC);
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..b6842fdb53d4 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -366,9 +366,10 @@ fail:
* specification [SCTP] and any extensions for a list of possible
* error formats.
*/
-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
- const struct sctp_association *asoc, struct sctp_chunk *chunk,
- __u16 flags, gfp_t gfp)
+struct sctp_ulpevent *
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, __u16 flags,
+ gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
/* Copy the skb to a new skb with room for us to prepend
* notification with.
*/
- skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
- 0, gfp);
+ skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
/* Pull off the rest of the cause TLV from the chunk. */
skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
- sre = (struct sctp_remote_error *)
- skb_push(skb, sizeof(struct sctp_remote_error));
+ sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
/* Trim the buffer to the right length. */
- skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
+ skb_trim(skb, sizeof(*sre) + elen);
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_type:
- * It should be SCTP_REMOTE_ERROR.
- */
+ /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
+ memset(sre, 0, sizeof(*sre));
sre->sre_type = SCTP_REMOTE_ERROR;
-
- /*
- * Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_flags: 16 bits (unsigned integer)
- * Currently unused.
- */
sre->sre_flags = 0;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_length: sizeof (__u32)
- *
- * This field is the total length of the notification data,
- * including the notification header.
- */
sre->sre_length = skb->len;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_error: 16 bits (unsigned integer)
- * This value represents one of the Operational Error causes defined in
- * the SCTP specification, in network byte order.
- */
sre->sre_error = cause;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_assoc_id: sizeof (sctp_assoc_t)
- *
- * The association id field, holds the identifier for the association.
- * All notifications for a given association have the same association
- * identifier. For TCP style socket, this field is ignored.
- */
sctp_ulpevent_set_owner(event, asoc);
sre->sre_assoc_id = sctp_assoc2id(asoc);
return event;
-
fail:
return NULL;
}
@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
return notification->sn_header.sn_type;
}
-/* Copy out the sndrcvinfo into a msghdr. */
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
+ * (SCTP_SNDRCV, DEPRECATED)
+ */
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *msghdr)
{
@@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
if (sctp_ulpevent_is_notification(event))
return;
- /* Sockets API Extensions for SCTP
- * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
- *
- * sinfo_stream: 16 bits (unsigned integer)
- *
- * For recvmsg() the SCTP stack places the message's stream number in
- * this value.
- */
+ memset(&sinfo, 0, sizeof(sinfo));
sinfo.sinfo_stream = event->stream;
- /* sinfo_ssn: 16 bits (unsigned integer)
- *
- * For recvmsg() this value contains the stream sequence number that
- * the remote endpoint placed in the DATA chunk. For fragmented
- * messages this is the same number for all deliveries of the message
- * (if more than one recvmsg() is needed to read the message).
- */
sinfo.sinfo_ssn = event->ssn;
- /* sinfo_ppid: 32 bits (unsigned integer)
- *
- * In recvmsg() this value is
- * the same information that was passed by the upper layer in the peer
- * application. Please note that byte order issues are NOT accounted
- * for and this information is passed opaquely by the SCTP stack from
- * one end to the other.
- */
sinfo.sinfo_ppid = event->ppid;
- /* sinfo_flags: 16 bits (unsigned integer)
- *
- * This field may contain any of the following flags and is composed of
- * a bitwise OR of these values.
- *
- * recvmsg() flags:
- *
- * SCTP_UNORDERED - This flag is present when the message was sent
- * non-ordered.
- */
sinfo.sinfo_flags = event->flags;
- /* sinfo_tsn: 32 bit (unsigned integer)
- *
- * For the receiving side, this field holds a TSN that was
- * assigned to one of the SCTP Data Chunks.
- */
sinfo.sinfo_tsn = event->tsn;
- /* sinfo_cumtsn: 32 bit (unsigned integer)
- *
- * This field will hold the current cumulative TSN as
- * known by the underlying SCTP layer. Note this field is
- * ignored when sending and only valid for a receive
- * operation when sinfo_flags are set to SCTP_UNORDERED.
- */
sinfo.sinfo_cumtsn = event->cumtsn;
- /* sinfo_assoc_id: sizeof (sctp_assoc_t)
- *
- * The association handle field, sinfo_assoc_id, holds the identifier
- * for the association announced in the COMMUNICATION_UP notification.
- * All notifications for a given association have the same identifier.
- * Ignored for one-to-one style sockets.
- */
sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
-
- /* context value that is set via SCTP_CONTEXT socket option. */
+ /* Context value that is set via SCTP_CONTEXT socket option. */
sinfo.sinfo_context = event->asoc->default_rcv_context;
-
/* These fields are not used while receiving. */
sinfo.sinfo_timetolive = 0;
put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
- sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
+ sizeof(sinfo), &sinfo);
}
/* Do accounting for bytes received and hold a reference to the association
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 26631679a1fa..55c6c9d3e1ce 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -559,6 +559,7 @@ receive:
buf = node->bclink.deferred_head;
node->bclink.deferred_head = buf->next;
+ buf->next = NULL;
node->bclink.deferred_size--;
goto receive;
}
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8be6e94a1ca9..0a37a472c29f 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -101,9 +101,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
}
/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
- * Let first buffer become head buffer
- * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
- * Leaves headbuf pointer at NULL if failure
+ * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
+ * out: set when successful non-complete reassembly, otherwise NULL
+ * @*buf: in: the buffer to append. Always defined
+ * out: head buf after sucessful complete reassembly, otherwise NULL
+ * Returns 1 when reassembly complete, otherwise 0
*/
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
{
@@ -122,6 +124,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
goto out_free;
head = *headbuf = frag;
skb_frag_list_init(head);
+ *buf = NULL;
return 0;
}
if (!head)
@@ -150,5 +153,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
out_free:
pr_warn_ratelimited("Unable to build fragment list\n");
kfree_skb(*buf);
+ kfree_skb(*headbuf);
+ *buf = *headbuf = NULL;
return 0;
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index e9afbf10e756..7e3a3cef7df9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
if (end >= start)
return jiffies_to_msecs(end - start);
- return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
+ return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
}
void
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ba4f1723c83a..6668daf69326 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1497,18 +1497,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
}
CMD(start_p2p_device, START_P2P_DEVICE);
CMD(set_mcast_rate, SET_MCAST_RATE);
+#ifdef CONFIG_NL80211_TESTMODE
+ CMD(testmode_cmd, TESTMODE);
+#endif
if (state->split) {
CMD(crit_proto_start, CRIT_PROTOCOL_START);
CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
CMD(channel_switch, CHANNEL_SWITCH);
+ CMD(set_qos_map, SET_QOS_MAP);
}
- CMD(set_qos_map, SET_QOS_MAP);
-
-#ifdef CONFIG_NL80211_TESTMODE
- CMD(testmode_cmd, TESTMODE);
-#endif
-
+ /* add into the if now */
#undef CMD
if (rdev->ops->connect || rdev->ops->auth) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 558b0e3a02d8..1afdf45db38f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -935,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
if (!band_rule_found)
band_rule_found = freq_in_rule_band(fr, center_freq);
- bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5));
+ bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
if (band_rule_found && bw_fits)
return rr;
@@ -1019,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
}
#endif
-/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency
- * chan->center_freq fits there.
- * If there is no such reg_rule, disable the channel, otherwise set the
- * flags corresponding to the bandwidths allowed in the particular reg_rule
+/*
+ * Note that right now we assume the desired channel bandwidth
+ * is always 20 MHz for each individual channel (HT40 uses 20 MHz
+ * per channel, the primary and the extension channel).
*/
static void handle_channel(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
@@ -1083,12 +1083,8 @@ static void handle_channel(struct wiphy *wiphy,
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
- if (max_bandwidth_khz < MHZ_TO_KHZ(10))
- bw_flags = IEEE80211_CHAN_NO_10MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(20))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags |= IEEE80211_CHAN_NO_HT40;
+ bw_flags = IEEE80211_CHAN_NO_HT40;
if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1522,12 +1518,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
- if (max_bandwidth_khz < MHZ_TO_KHZ(10))
- bw_flags = IEEE80211_CHAN_NO_10MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(20))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags |= IEEE80211_CHAN_NO_HT40;
+ bw_flags = IEEE80211_CHAN_NO_HT40;
if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(160))
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 560ed77084e9..7cc887f9da11 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2094,7 +2094,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
MAC_ASSIGN(addr, addr);
__entry->key_type = key_type;
__entry->key_id = key_id;
- memcpy(__entry->tsc, tsc, 6);
+ if (tsc)
+ memcpy(__entry->tsc, tsc, 6);
),
TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index a8ef5108e0d8..0525d78ba328 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2097,6 +2097,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
goto no_transform;
}
+ dst_hold(&xdst->u.dst);
+ xdst->u.dst.flags |= DST_NOCACHE;
route = xdst->route;
}
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 412d9dc3a873..d4db6ebb089d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -177,9 +177,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ALG_COMP] ||
- attrs[XFRMA_TFCPAD] ||
- (ntohl(p->id.spi) >= 0x10000))
-
+ attrs[XFRMA_TFCPAD])
goto out;
break;
@@ -207,7 +205,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT] ||
- attrs[XFRMA_TFCPAD])
+ attrs[XFRMA_TFCPAD] ||
+ (ntohl(p->id.spi) >= 0x10000))
goto out;
break;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index da058da413e7..16a07cfa4d34 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2073,6 +2073,7 @@ sub check_return_section {
sub dump_function($$) {
my $prototype = shift;
my $file = shift;
+ my $noret = 0;
$prototype =~ s/^static +//;
$prototype =~ s/^extern +//;
@@ -2086,7 +2087,7 @@ sub dump_function($$) {
$prototype =~ s/__init_or_module +//;
$prototype =~ s/__must_check +//;
$prototype =~ s/__weak +//;
- $prototype =~ s/^#\s*define\s+//; #ak added
+ my $define = $prototype =~ s/^#\s*define\s+//; #ak added
$prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
# Yes, this truly is vile. We are looking for:
@@ -2105,7 +2106,15 @@ sub dump_function($$) {
# - atomic_set (macro)
# - pci_match_device, __copy_to_user (long return type)
- if ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
+ if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) {
+ # This is an object-like macro, it has no return type and no parameter
+ # list.
+ # Function-like macros are not allowed to have spaces between
+ # declaration_name and opening parenthesis (notice the \s+).
+ $return_type = $1;
+ $declaration_name = $2;
+ $noret = 1;
+ } elsif ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
$prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
$prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
$prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
@@ -2140,7 +2149,7 @@ sub dump_function($$) {
# of warnings goes sufficiently down, the check is only performed in
# verbose mode.
# TODO: always perform the check.
- if ($verbose) {
+ if ($verbose && !$noret) {
check_return_section($file, $declaration_name, $return_type);
}
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 6af50eb80ea7..70faa3a32526 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
struct special_params *params = bebob->maudio_special_quirk;
int err, id;
- mutex_lock(&bebob->mutex);
-
id = uval->value.enumerated.item[0];
if (id >= ARRAY_SIZE(special_clk_labels))
- return 0;
+ return -EINVAL;
+
+ mutex_lock(&bebob->mutex);
err = avc_maudio_set_special_clk(bebob, id,
params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
params->clk_lock);
mutex_unlock(&bebob->mutex);
- return err >= 0;
+ if (err >= 0)
+ err = 1;
+
+ return err;
}
static struct snd_kcontrol_new special_clk_ctl = {
.name = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
.get = special_sync_ctl_get,
};
-/* Digital interface control for special firmware */
-static char *const special_dig_iface_labels[] = {
+/* Digital input interface control for special firmware */
+static char *const special_dig_in_iface_labels[] = {
"S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
};
static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
{
einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
einf->count = 1;
- einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels);
+ einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
if (einf->value.enumerated.item >= einf->value.enumerated.items)
einf->value.enumerated.item = einf->value.enumerated.items - 1;
strcpy(einf->value.enumerated.name,
- special_dig_iface_labels[einf->value.enumerated.item]);
+ special_dig_in_iface_labels[einf->value.enumerated.item]);
return 0;
}
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
unsigned int id, dig_in_fmt, dig_in_iface;
int err;
- mutex_lock(&bebob->mutex);
-
id = uval->value.enumerated.item[0];
+ if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
+ return -EINVAL;
/* decode user value */
dig_in_fmt = (id >> 1) & 0x01;
dig_in_iface = id & 0x01;
+ mutex_lock(&bebob->mutex);
+
err = avc_maudio_set_special_clk(bebob,
params->clk_src,
dig_in_fmt,
params->dig_out_fmt,
params->clk_lock);
- if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */
+ if (err < 0)
+ goto end;
+
+ /* For ADAT, optical interface is only available. */
+ if (params->dig_in_fmt > 0) {
+ err = 1;
goto end;
+ }
+ /* For S/PDIF, optical/coaxial interfaces are selectable. */
err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
if (err < 0)
dev_err(&bebob->unit->device,
"fail to set digital input interface: %d\n", err);
+ err = 1;
end:
special_stream_formation_set(bebob);
mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
.put = special_dig_in_iface_ctl_set
};
+/* Digital output interface control for special firmware */
+static char *const special_dig_out_iface_labels[] = {
+ "S/PDIF Optical and Coaxial", "ADAT Optical"
+};
static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *einf)
{
einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
einf->count = 1;
- einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1;
+ einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
if (einf->value.enumerated.item >= einf->value.enumerated.items)
einf->value.enumerated.item = einf->value.enumerated.items - 1;
strcpy(einf->value.enumerated.name,
- special_dig_iface_labels[einf->value.enumerated.item + 1]);
+ special_dig_out_iface_labels[einf->value.enumerated.item]);
return 0;
}
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
unsigned int id;
int err;
- mutex_lock(&bebob->mutex);
-
id = uval->value.enumerated.item[0];
+ if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
+ return -EINVAL;
+
+ mutex_lock(&bebob->mutex);
err = avc_maudio_set_special_clk(bebob,
params->clk_src,
params->dig_in_fmt,
id, params->clk_lock);
- if (err >= 0)
+ if (err >= 0) {
special_stream_formation_set(bebob);
+ err = 1;
+ }
mutex_unlock(&bebob->mutex);
return err;
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 480bbddbd801..6df04d91c93c 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -193,7 +193,8 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
dsp_unlock(azx_dev);
return azx_dev;
}
- if (!res)
+ if (!res ||
+ (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
res = azx_dev;
}
dsp_unlock(azx_dev);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b6b4e71a0b0b..83cd19017cf3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -227,7 +227,7 @@ enum {
/* quirks for Intel PCH */
#define AZX_DCAPS_INTEL_PCH_NOPM \
(AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
- AZX_DCAPS_COUNT_LPIB_DELAY)
+ AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
#define AZX_DCAPS_INTEL_PCH \
(AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
@@ -596,7 +596,7 @@ static int azx_suspend(struct device *dev)
struct azx *chip = card->private_data;
struct azx_pcm *p;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -628,7 +628,7 @@ static int azx_resume(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
@@ -665,7 +665,7 @@ static int azx_runtime_suspend(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -692,7 +692,7 @@ static int azx_runtime_resume(struct device *dev)
struct hda_codec *codec;
int status;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -729,7 +729,7 @@ static int azx_runtime_idle(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- if (chip->disabled)
+ if (chip->disabled || chip->init_failed)
return 0;
if (!power_save_controller ||
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index 4a7cb01fa912..e9d1a5762a55 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -186,6 +186,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */
#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
+#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index a366ba9293a8..358414da6418 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -236,6 +236,7 @@ disable_hda:
return rc;
}
+#ifdef CONFIG_PM_SLEEP
static void hda_tegra_disable_clocks(struct hda_tegra *data)
{
clk_disable_unprepare(data->hda2hdmi_clk);
@@ -243,7 +244,6 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data)
clk_disable_unprepare(data->hda_clk);
}
-#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 4fe876b65fda..ba4ca52072ff 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3337,6 +3337,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
+{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3394,6 +3395,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
MODULE_ALIAS("snd-hda-codec-id:10de0051");
MODULE_ALIAS("snd-hda-codec-id:10de0060");
MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de0070");
MODULE_ALIAS("snd-hda-codec-id:10de0071");
MODULE_ALIAS("snd-hda-codec-id:10de8001");
MODULE_ALIAS("snd-hda-codec-id:11069f80");
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 0849b7b83f0a..0db94f492e97 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -59,7 +59,6 @@ int imx_pcm_dma_init(struct platform_device *pdev)
{
return devm_snd_dmaengine_pcm_register(&pdev->dev,
&imx_dmaengine_pcm_config,
- SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
SND_DMAENGINE_PCM_FLAG_COMPAT);
}
EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index c342f7087147..ee53a42818ca 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -35,7 +35,7 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return pthread_mutex_lock(&lock->mutex);
}
@@ -47,7 +47,7 @@ static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lo
static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
}
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
index a680ab8c2e36..4ec03f861551 100644
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ b/tools/lib/lockdep/include/liblockdep/rwlock.h
@@ -36,7 +36,7 @@ static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_rdlock(&lock->rwlock);
}
@@ -49,19 +49,19 @@ static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *
static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_wrlock(&lock->rwlock);
}
static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
}
static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
{
- lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
}
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 23bd69cb5ade..6f803609e498 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -92,7 +92,7 @@ enum { none, prepare, done, } __init_state;
static void init_preload(void);
static void try_init_preload(void)
{
- if (!__init_state != done)
+ if (__init_state != done)
init_preload();
}
@@ -252,7 +252,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
try_init_preload();
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL,
+ lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
(unsigned long)_RET_IP_);
/*
* Here's the thing with pthread mutexes: unlike the kernel variant,
@@ -281,7 +281,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
try_init_preload();
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_mutex_trylock(mutex);
if (r)
lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -303,7 +303,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
*/
r = ll_pthread_mutex_unlock(mutex);
if (r)
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return r;
}
@@ -352,7 +352,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
init_preload();
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_rdlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -366,7 +366,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
init_preload();
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_tryrdlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -380,7 +380,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
init_preload();
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_trywrlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -394,7 +394,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
init_preload();
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_wrlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -411,7 +411,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_unlock(rwlock);
if (r)
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+ lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return r;
}
@@ -439,8 +439,6 @@ __attribute__((constructor)) static void init_preload(void)
ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
#endif
- printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
-
lockdep_init();
__init_state = done;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 52c03fbbba17..04a229aa5c0f 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -17,6 +17,7 @@
#include "../util.h"
#include "../ui.h"
#include "map.h"
+#include "annotate.h"
struct hist_browser {
struct ui_browser b;
@@ -1593,13 +1594,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
bi->to.sym->name) > 0)
annotate_t = nr_options++;
} else {
-
if (browser->selection != NULL &&
browser->selection->sym != NULL &&
- !browser->selection->map->dso->annotate_warned &&
- asprintf(&options[nr_options], "Annotate %s",
- browser->selection->sym->name) > 0)
- annotate = nr_options++;
+ !browser->selection->map->dso->annotate_warned) {
+ struct annotation *notes;
+
+ notes = symbol__annotation(browser->selection->sym);
+
+ if (notes->src &&
+ asprintf(&options[nr_options], "Annotate %s",
+ browser->selection->sym->name) > 0)
+ annotate = nr_options++;
+ }
}
if (thread != NULL &&
@@ -1656,6 +1662,7 @@ retry_popup_menu:
if (choice == annotate || choice == annotate_t || choice == annotate_f) {
struct hist_entry *he;
+ struct annotation *notes;
int err;
do_annotate:
if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1679,6 +1686,10 @@ do_annotate:
he->ms.map = he->branch_info->to.map;
}
+ notes = symbol__annotation(he->ms.sym);
+ if (!notes->src)
+ continue;
+
/*
* Don't let this be freed, say, by hists__decay_entry.
*/
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 0e5fea95d596..c73e1fc12e53 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,18 +496,6 @@ struct process_args {
u64 start;
};
-static int symbol__in_kernel(void *arg, const char *name,
- char type __maybe_unused, u64 start)
-{
- struct process_args *args = arg;
-
- if (strchr(name, '['))
- return 0;
-
- args->start = start;
- return 1;
-}
-
static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
size_t bufsz)
{
@@ -517,27 +505,41 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
}
-/* Figure out the start address of kernel map from /proc/kallsyms */
-static u64 machine__get_kernel_start_addr(struct machine *machine)
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
+/* Figure out the start address of kernel map from /proc/kallsyms.
+ * Returns the name of the start symbol in *symbol_name. Pass in NULL as
+ * symbol_name if it's not that important.
+ */
+static u64 machine__get_kernel_start_addr(struct machine *machine,
+ const char **symbol_name)
{
char filename[PATH_MAX];
- struct process_args args;
+ int i;
+ const char *name;
+ u64 addr = 0;
machine__get_kallsyms_filename(machine, filename, PATH_MAX);
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return 0;
- if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
- return 0;
+ for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+ addr = kallsyms__get_function_start(filename, name);
+ if (addr)
+ break;
+ }
+
+ if (symbol_name)
+ *symbol_name = name;
- return args.start;
+ return addr;
}
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
enum map_type type;
- u64 start = machine__get_kernel_start_addr(machine);
+ u64 start = machine__get_kernel_start_addr(machine, NULL);
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
@@ -852,23 +854,11 @@ static int machine__create_modules(struct machine *machine)
return 0;
}
-const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
-
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
- char filename[PATH_MAX];
const char *name;
- u64 addr = 0;
- int i;
-
- machine__get_kallsyms_filename(machine, filename, PATH_MAX);
-
- for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
- addr = kallsyms__get_function_start(filename, name);
- if (addr)
- break;
- }
+ u64 addr = machine__get_kernel_start_addr(machine, &name);
if (!addr)
return -1;
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 447321104ec0..e775adcbd29f 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -21,7 +21,7 @@ OBJS = tmon.o tui.o sysfs.o pid.o
OBJS +=
tmon: $(OBJS) Makefile tmon.h
- $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -lpthread
+ $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread
valgrind: tmon
sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
index b30f531173e4..09b7c3218334 100644
--- a/tools/thermal/tmon/tmon.c
+++ b/tools/thermal/tmon/tmon.c
@@ -142,6 +142,7 @@ static void start_syslog(void)
static void prepare_logging(void)
{
int i;
+ struct stat logstat;
if (!logging)
return;
@@ -152,6 +153,29 @@ static void prepare_logging(void)
return;
}
+ if (lstat(TMON_LOG_FILE, &logstat) < 0) {
+ syslog(LOG_ERR, "Unable to stat log file %s\n", TMON_LOG_FILE);
+ fclose(tmon_log);
+ tmon_log = NULL;
+ return;
+ }
+
+ /* The log file must be a regular file owned by us */
+ if (S_ISLNK(logstat.st_mode)) {
+ syslog(LOG_ERR, "Log file is a symlink. Will not log\n");
+ fclose(tmon_log);
+ tmon_log = NULL;
+ return;
+ }
+
+ if (logstat.st_uid != getuid()) {
+ syslog(LOG_ERR, "We don't own the log file. Not logging\n");
+ fclose(tmon_log);
+ tmon_log = NULL;
+ return;
+ }
+
+
fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n");
for (i = 0; i < ptdata.nr_tz_sensor; i++) {
char binding_str[33]; /* size of long + 1 */
@@ -331,7 +355,7 @@ static void start_daemon_mode()
disable_tui();
/* change the file mode mask */
- umask(0);
+ umask(S_IWGRP | S_IWOTH);
/* new SID for the daemon process */
sid = setsid();
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 56ff9bebb577..476d3bf540a8 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1526,17 +1526,33 @@ int kvm_vgic_hyp_init(void)
goto out_unmap;
}
- kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
- vctrl_res.start, vgic_maint_irq);
- on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
kvm_err("Cannot obtain VCPU resource\n");
ret = -ENXIO;
goto out_unmap;
}
+
+ if (!PAGE_ALIGNED(vcpu_res.start)) {
+ kvm_err("GICV physical address 0x%llx not page aligned\n",
+ (unsigned long long)vcpu_res.start);
+ ret = -ENXIO;
+ goto out_unmap;
+ }
+
+ if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+ kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+ (unsigned long long)resource_size(&vcpu_res),
+ PAGE_SIZE);
+ ret = -ENXIO;
+ goto out_unmap;
+ }
+
vgic_vcpu_base = vcpu_res.start;
+ kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+ vctrl_res.start, vgic_maint_irq);
+ on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
goto out;
out_unmap: