diff options
709 files changed, 30864 insertions, 14640 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index cdd8b24db68d..cc303a2f641c 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl @@ -229,6 +229,7 @@ X!Isound/sound_firmware.c !Iinclude/media/v4l2-dv-timings.h !Iinclude/media/v4l2-event.h !Iinclude/media/v4l2-flash-led-class.h +!Iinclude/media/v4l2-mc.h !Iinclude/media/v4l2-mediabus.h !Iinclude/media/v4l2-mem2mem.h !Iinclude/media/v4l2-of.h diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index a8669330b456..fe6b36a2fd98 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -2886,52 +2886,8 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis> </sect2> <sect2> <title>File Operations</title> - <synopsis>const struct file_operations *fops</synopsis> - <abstract>File operations for the DRM device node.</abstract> - <para> - Drivers must define the file operations structure that forms the DRM - userspace API entry point, even though most of those operations are - implemented in the DRM core. The <methodname>open</methodname>, - <methodname>release</methodname> and <methodname>ioctl</methodname> - operations are handled by - <programlisting> - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - #ifdef CONFIG_COMPAT - .compat_ioctl = drm_compat_ioctl, - #endif - </programlisting> - </para> - <para> - Drivers that implement private ioctls that requires 32/64bit - compatibility support must provide their own - <methodname>compat_ioctl</methodname> handler that processes private - ioctls and calls <function>drm_compat_ioctl</function> for core ioctls. - </para> - <para> - The <methodname>read</methodname> and <methodname>poll</methodname> - operations provide support for reading DRM events and polling them. They - are implemented by - <programlisting> - .poll = drm_poll, - .read = drm_read, - .llseek = no_llseek, - </programlisting> - </para> - <para> - The memory mapping implementation varies depending on how the driver - manages memory. Pre-GEM drivers will use <function>drm_mmap</function>, - while GEM-aware drivers will use <function>drm_gem_mmap</function>. See - <xref linkend="drm-gem"/>. - <programlisting> - .mmap = drm_gem_mmap, - </programlisting> - </para> - <para> - No other file operation is supported by the DRM API. - </para> +!Pdrivers/gpu/drm/drm_fops.c file operations +!Edrivers/gpu/drm/drm_fops.c </sect2> <sect2> <title>IOCTLs</title> @@ -3319,6 +3275,12 @@ int num_ioctls;</synopsis> !Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc !Idrivers/gpu/drm/i915/intel_csr.c </sect2> + <sect2> + <title>Video BIOS Table (VBT)</title> +!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT) +!Idrivers/gpu/drm/i915/intel_bios.c +!Idrivers/gpu/drm/i915/intel_bios.h + </sect2> </sect1> <sect1> @@ -3460,6 +3422,7 @@ int num_ioctls;</synopsis> </sect1> <sect1> <title>Public constants</title> +!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler_flags_t !Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id !Finclude/linux/vga_switcheroo.h vga_switcheroo_state </sect1> @@ -3488,6 +3451,10 @@ int num_ioctls;</synopsis> <title>Backlight control</title> !Pdrivers/platform/x86/apple-gmux.c Backlight control </sect2> + <sect2> + <title>Public functions</title> +!Iinclude/linux/apple-gmux.h + </sect2> </sect1> </chapter> diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml index f13a429093f1..361040e6b0f4 100644 --- a/Documentation/DocBook/media/v4l/controls.xml +++ b/Documentation/DocBook/media/v4l/controls.xml @@ -2330,6 +2330,14 @@ vertical search range for motion estimation module in video encoder.</entry> </row> <row><entry></entry></row> + <row id="v4l2-mpeg-video-force-key-frame"> + <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME</constant> </entry> + <entry>button</entry> + </row><row><entry spanname="descr">Force a key frame for the next queued buffer. Applicable to encoders. +This is a general, codec-agnostic keyframe control.</entry> + </row> + + <row><entry></entry></row> <row> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE</constant> </entry> <entry>integer</entry> @@ -5070,6 +5078,46 @@ interface and may change in the future.</para> </entry> </row> <row> + <entry spanname="id"><constant>V4L2_CID_DV_TX_IT_CONTENT_TYPE</constant></entry> + <entry id="v4l2-dv-content-type">enum v4l2_dv_it_content_type</entry> + </row> + <row><entry spanname="descr">Configures the IT Content Type + of the transmitted video. This information is sent over HDMI and DisplayPort connectors + as part of the AVI InfoFrame. The term 'IT Content' is used for content that originates + from a computer as opposed to content from a TV broadcast or an analog source. The + enum v4l2_dv_it_content_type defines the possible content types:</entry> + </row> + <row> + <entrytbl spanname="descr" cols="2"> + <tbody valign="top"> + <row> + <entry><constant>V4L2_DV_IT_CONTENT_TYPE_GRAPHICS</constant> </entry> + <entry>Graphics content. Pixel data should be passed unfiltered and without + analog reconstruction.</entry> + </row> + <row> + <entry><constant>V4L2_DV_IT_CONTENT_TYPE_PHOTO</constant> </entry> + <entry>Photo content. The content is derived from digital still pictures. + The content should be passed through with minimal scaling and picture + enhancements.</entry> + </row> + <row> + <entry><constant>V4L2_DV_IT_CONTENT_TYPE_CINEMA</constant> </entry> + <entry>Cinema content.</entry> + </row> + <row> + <entry><constant>V4L2_DV_IT_CONTENT_TYPE_GAME</constant> </entry> + <entry>Game content. Audio and video latency should be minimized.</entry> + </row> + <row> + <entry><constant>V4L2_DV_IT_CONTENT_TYPE_NO_ITC</constant> </entry> + <entry>No IT Content information is available and the ITC bit in the AVI + InfoFrame is set to 0.</entry> + </row> + </tbody> + </entrytbl> + </row> + <row> <entry spanname="id"><constant>V4L2_CID_DV_RX_POWER_PRESENT</constant></entry> <entry>bitmask</entry> </row> @@ -5098,6 +5146,16 @@ interface and may change in the future.</para> This control is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors. </entry> </row> + <row> + <entry spanname="id"><constant>V4L2_CID_DV_RX_IT_CONTENT_TYPE</constant></entry> + <entry>enum v4l2_dv_it_content_type</entry> + </row> + <row><entry spanname="descr">Reads the IT Content Type + of the received video. This information is sent over HDMI and DisplayPort connectors + as part of the AVI InfoFrame. The term 'IT Content' is used for content that originates + from a computer as opposed to content from a TV broadcast or an analog source. See + <constant>V4L2_CID_DV_TX_IT_CONTENT_TYPE</constant> for the available content types.</entry> + </row> <row><entry></entry></row> </tbody> </tgroup> diff --git a/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml b/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml index 63152ab9efba..e0d49fa329f0 100644 --- a/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml +++ b/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml @@ -48,9 +48,6 @@ <refsect1> <title>Description</title> - - <para><emphasis role="bold">NOTE:</emphasis> This new ioctl is programmed to be added on Kernel 4.6. Its definition/arguments may change until its final version.</para> - <para>The typical usage of this ioctl is to call it twice. On the first call, the structure defined at &media-v2-topology; should be zeroed. At return, if no errors happen, this ioctl will return the diff --git a/Documentation/DocBook/media/v4l/media-types.xml b/Documentation/DocBook/media/v4l/media-types.xml index 1af384250910..8b4fa39cf611 100644 --- a/Documentation/DocBook/media/v4l/media-types.xml +++ b/Documentation/DocBook/media/v4l/media-types.xml @@ -57,10 +57,6 @@ <entry>Connector for a RGB composite signal.</entry> </row> <row> - <entry><constant>MEDIA_ENT_F_CONN_TEST</constant></entry> - <entry>Connector for a test generator.</entry> - </row> - <row> <entry><constant>MEDIA_ENT_F_CAM_SENSOR</constant></entry> <entry>Camera video sensor entity.</entry> </row> @@ -84,7 +80,34 @@ </row> <row> <entry><constant>MEDIA_ENT_F_TUNER</constant></entry> - <entry>Digital TV, analog TV, radio and/or software radio tuner.</entry> + <entry>Digital TV, analog TV, radio and/or software radio tuner, + with consists on a PLL tuning stage that converts radio + frequency (RF) signal into an Intermediate Frequency (IF). + Modern tuners have internally IF-PLL decoders for audio + and video, but older models have those stages implemented + on separate entities. + </entry> + </row> + <row> + <entry><constant>MEDIA_ENT_F_IF_VID_DECODER</constant></entry> + <entry>IF-PLL video decoder. It receives the IF from a PLL + and decodes the analog TV video signal. This is commonly + found on some very old analog tuners, like Philips MK3 + designs. They all contain a tda9887 (or some software + compatible similar chip, like tda9885). Those devices + use a different I2C address than the tuner PLL. + </entry> + </row> + <row> + <entry><constant>MEDIA_ENT_F_IF_AUD_DECODER</constant></entry> + <entry>IF-PLL sound decoder. It receives the IF from a PLL + and decodes the analog TV audio signal. This is commonly + found on some very old analog hardware, like Micronas + msp3400, Philips tda9840, tda985x, etc. Those devices + use a different I2C address than the tuner PLL and + should be controlled together with the IF-PLL video + decoder. + </entry> </row> </tbody> </tgroup> diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml index e781cc61786c..7d13fe96657d 100644 --- a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml +++ b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml @@ -1,35 +1,43 @@ - <refentry id="V4L2-PIX-FMT-YUV420M"> + <refentry> <refmeta> - <refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12')</refentrytitle> + <refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12'), V4L2_PIX_FMT_YVU420M ('YM21')</refentrytitle> &manvol; </refmeta> <refnamediv> - <refname> <constant>V4L2_PIX_FMT_YUV420M</constant></refname> - <refpurpose>Variation of <constant>V4L2_PIX_FMT_YUV420</constant> - with planes non contiguous in memory. </refpurpose> + <refname id="V4L2-PIX-FMT-YUV420M"><constant>V4L2_PIX_FMT_YUV420M</constant></refname> + <refname id="V4L2-PIX-FMT-YVU420M"><constant>V4L2_PIX_FMT_YVU420M</constant></refname> + <refpurpose>Variation of <constant>V4L2_PIX_FMT_YUV420</constant> and + <constant>V4L2_PIX_FMT_YVU420</constant> with planes non contiguous + in memory.</refpurpose> </refnamediv> <refsect1> <title>Description</title> <para>This is a multi-planar format, as opposed to a packed format. -The three components are separated into three sub- images or planes. +The three components are separated into three sub-images or planes.</para> -The Y plane is first. The Y plane has one byte per pixel. The Cb data + <para>The Y plane is first. The Y plane has one byte per pixel. +For <constant>V4L2_PIX_FMT_YUV420M</constant> the Cb data constitutes the second plane which is half the width and half the height of the Y plane (and of the image). Each Cb belongs to four pixels, a two-by-two square of the image. For example, Cb<subscript>0</subscript> belongs to Y'<subscript>00</subscript>, Y'<subscript>01</subscript>, Y'<subscript>10</subscript>, and Y'<subscript>11</subscript>. The Cr data, just like the Cb plane, is -in the third plane. </para> +in the third plane.</para> + + <para><constant>V4L2_PIX_FMT_YVU420M</constant> is the same except +the Cr data is stored in the second plane and the Cb data in the third plane. +</para> <para>If the Y plane has pad bytes after each row, then the Cb and Cr planes have half as many pad bytes after their rows. In other words, two Cx rows (including padding) is exactly as long as one Y row (including padding).</para> - <para><constant>V4L2_PIX_FMT_YUV420M</constant> is intended to be + <para><constant>V4L2_PIX_FMT_YUV420M</constant> and +<constant>V4L2_PIX_FMT_YVU420M</constant> are intended to be used only in drivers and applications that support the multi-planar API, described in <xref linkend="planar-apis"/>. </para> diff --git a/Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv422m.xml index 2330667907c7..dd502802cb75 100644 --- a/Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml +++ b/Documentation/DocBook/media/v4l/pixfmt-yuv422m.xml @@ -1,40 +1,45 @@ - <refentry id="V4L2-PIX-FMT-YVU420M"> + <refentry> <refmeta> - <refentrytitle>V4L2_PIX_FMT_YVU420M ('YM21')</refentrytitle> + <refentrytitle>V4L2_PIX_FMT_YUV422M ('YM16'), V4L2_PIX_FMT_YVU422M ('YM61')</refentrytitle> &manvol; </refmeta> <refnamediv> - <refname> <constant>V4L2_PIX_FMT_YVU420M</constant></refname> - <refpurpose>Variation of <constant>V4L2_PIX_FMT_YVU420</constant> - with planes non contiguous in memory. </refpurpose> + <refname id="V4L2-PIX-FMT-YUV422M"><constant>V4L2_PIX_FMT_YUV422M</constant></refname> + <refname id="V4L2-PIX-FMT-YVU422M"><constant>V4L2_PIX_FMT_YVU422M</constant></refname> + <refpurpose>Planar formats with ½ horizontal resolution, also + known as YUV and YVU 4:2:2</refpurpose> </refnamediv> <refsect1> <title>Description</title> <para>This is a multi-planar format, as opposed to a packed format. -The three components are separated into three sub-images or planes. +The three components are separated into three sub-images or planes.</para> -The Y plane is first. The Y plane has one byte per pixel. The Cr data -constitutes the second plane which is half the width and half -the height of the Y plane (and of the image). Each Cr belongs to four -pixels, a two-by-two square of the image. For example, -Cr<subscript>0</subscript> belongs to Y'<subscript>00</subscript>, -Y'<subscript>01</subscript>, Y'<subscript>10</subscript>, and -Y'<subscript>11</subscript>. The Cb data, just like the Cr plane, constitutes -the third plane. </para> + <para>The Y plane is first. The Y plane has one byte per pixel. +For <constant>V4L2_PIX_FMT_YUV422M</constant> the Cb data +constitutes the second plane which is half the width of the Y plane (and of the +image). Each Cb belongs to two pixels. For example, +Cb<subscript>0</subscript> belongs to Y'<subscript>00</subscript>, +Y'<subscript>01</subscript>. The Cr data, just like the Cb plane, is +in the third plane. </para> - <para>If the Y plane has pad bytes after each row, then the Cr -and Cb planes have half as many pad bytes after their rows. In other + <para><constant>V4L2_PIX_FMT_YVU422M</constant> is the same except +the Cr data is stored in the second plane and the Cb data in the third plane. +</para> + + <para>If the Y plane has pad bytes after each row, then the Cb +and Cr planes have half as many pad bytes after their rows. In other words, two Cx rows (including padding) is exactly as long as one Y row (including padding).</para> - <para><constant>V4L2_PIX_FMT_YVU420M</constant> is intended to be + <para><constant>V4L2_PIX_FMT_YUV422M</constant> and +<constant>V4L2_PIX_FMT_YVU422M</constant> are intended to be used only in drivers and applications that support the multi-planar API, described in <xref linkend="planar-apis"/>. </para> <example> - <title><constant>V4L2_PIX_FMT_YVU420M</constant> 4 × 4 + <title><constant>V4L2_PIX_FMT_YUV422M</constant> 4 × 4 pixel image</title> <formalpara> @@ -75,24 +80,44 @@ pixel image</title> <row><entry></entry></row> <row> <entry>start1 + 0:</entry> - <entry>Cr<subscript>00</subscript></entry> - <entry>Cr<subscript>01</subscript></entry> + <entry>Cb<subscript>00</subscript></entry> + <entry>Cb<subscript>01</subscript></entry> </row> <row> <entry>start1 + 2:</entry> - <entry>Cr<subscript>10</subscript></entry> - <entry>Cr<subscript>11</subscript></entry> + <entry>Cb<subscript>10</subscript></entry> + <entry>Cb<subscript>11</subscript></entry> + </row> + <row> + <entry>start1 + 4:</entry> + <entry>Cb<subscript>20</subscript></entry> + <entry>Cb<subscript>21</subscript></entry> + </row> + <row> + <entry>start1 + 6:</entry> + <entry>Cb<subscript>30</subscript></entry> + <entry>Cb<subscript>31</subscript></entry> </row> <row><entry></entry></row> <row> <entry>start2 + 0:</entry> - <entry>Cb<subscript>00</subscript></entry> - <entry>Cb<subscript>01</subscript></entry> + <entry>Cr<subscript>00</subscript></entry> + <entry>Cr<subscript>01</subscript></entry> </row> <row> <entry>start2 + 2:</entry> - <entry>Cb<subscript>10</subscript></entry> - <entry>Cb<subscript>11</subscript></entry> + <entry>Cr<subscript>10</subscript></entry> + <entry>Cr<subscript>11</subscript></entry> + </row> + <row> + <entry>start2 + 4:</entry> + <entry>Cr<subscript>20</subscript></entry> + <entry>Cr<subscript>21</subscript></entry> + </row> + <row> + <entry>start2 + 6:</entry> + <entry>Cr<subscript>30</subscript></entry> + <entry>Cr<subscript>31</subscript></entry> </row> </tbody> </tgroup> @@ -113,36 +138,23 @@ pixel image</title> </row> <row> <entry>0</entry> - <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry> - <entry>Y</entry><entry></entry><entry>Y</entry> - </row> - <row> - <entry></entry> - <entry></entry><entry>C</entry><entry></entry><entry></entry> - <entry></entry><entry>C</entry><entry></entry> + <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry> + <entry>Y</entry><entry>C</entry><entry>Y</entry> </row> <row> <entry>1</entry> - <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry> - <entry>Y</entry><entry></entry><entry>Y</entry> - </row> - <row> - <entry></entry> + <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry> + <entry>Y</entry><entry>C</entry><entry>Y</entry> </row> <row> <entry>2</entry> - <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry> - <entry>Y</entry><entry></entry><entry>Y</entry> - </row> - <row> - <entry></entry> - <entry></entry><entry>C</entry><entry></entry><entry></entry> - <entry></entry><entry>C</entry><entry></entry> + <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry> + <entry>Y</entry><entry>C</entry><entry>Y</entry> </row> <row> <entry>3</entry> - <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry> - <entry>Y</entry><entry></entry><entry>Y</entry> + <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry> + <entry>Y</entry><entry>C</entry><entry>Y</entry> </row> </tbody> </tgroup> diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml new file mode 100644 index 000000000000..1b7335940bc7 --- /dev/null +++ b/Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml @@ -0,0 +1,177 @@ + <refentry> + <refmeta> + <refentrytitle>V4L2_PIX_FMT_YUV444M ('YM24'), V4L2_PIX_FMT_YVU444M ('YM42')</refentrytitle> + &manvol; + </refmeta> + <refnamediv> + <refname id="V4L2-PIX-FMT-YUV444M"><constant>V4L2_PIX_FMT_YUV444M</constant></refname> + <refname id="V4L2-PIX-FMT-YVU444M"><constant>V4L2_PIX_FMT_YVU444M</constant></refname> + <refpurpose>Planar formats with full horizontal resolution, also + known as YUV and YVU 4:4:4</refpurpose> + </refnamediv> + + <refsect1> + <title>Description</title> + + <para>This is a multi-planar format, as opposed to a packed format. +The three components are separated into three sub-images or planes.</para> + + <para>The Y plane is first. The Y plane has one byte per pixel. +For <constant>V4L2_PIX_FMT_YUV444M</constant> the Cb data +constitutes the second plane which is the same width and height as the Y plane +(and as the image). The Cr data, just like the Cb plane, is in the third plane. +</para> + + <para><constant>V4L2_PIX_FMT_YVU444M</constant> is the same except +the Cr data is stored in the second plane and the Cb data in the third plane. +</para> + <para>If the Y plane has pad bytes after each row, then the Cb +and Cr planes have the same number of pad bytes after their rows.</para> + + <para><constant>V4L2_PIX_FMT_YUV444M</constant> and +<constant>V4L2_PIX_FMT_YUV444M</constant> are intended to be +used only in drivers and applications that support the multi-planar API, +described in <xref linkend="planar-apis"/>. </para> + + <example> + <title><constant>V4L2_PIX_FMT_YUV444M</constant> 4 × 4 +pixel image</title> + + <formalpara> + <title>Byte Order.</title> + <para>Each cell is one byte. + <informaltable frame="none"> + <tgroup cols="5" align="center"> + <colspec align="left" colwidth="2*" /> + <tbody valign="top"> + <row> + <entry>start0 + 0:</entry> + <entry>Y'<subscript>00</subscript></entry> + <entry>Y'<subscript>01</subscript></entry> + <entry>Y'<subscript>02</subscript></entry> + <entry>Y'<subscript>03</subscript></entry> + </row> + <row> + <entry>start0 + 4:</entry> + <entry>Y'<subscript>10</subscript></entry> + <entry>Y'<subscript>11</subscript></entry> + <entry>Y'<subscript>12</subscript></entry> + <entry>Y'<subscript>13</subscript></entry> + </row> + <row> + <entry>start0 + 8:</entry> + <entry>Y'<subscript>20</subscript></entry> + <entry>Y'<subscript>21</subscript></entry> + <entry>Y'<subscript>22</subscript></entry> + <entry>Y'<subscript>23</subscript></entry> + </row> + <row> + <entry>start0 + 12:</entry> + <entry>Y'<subscript>30</subscript></entry> + <entry>Y'<subscript>31</subscript></entry> + <entry>Y'<subscript>32</subscript></entry> + <entry>Y'<subscript>33</subscript></entry> + </row> + <row><entry></entry></row> + <row> + <entry>start1 + 0:</entry> + <entry>Cb<subscript>00</subscript></entry> + <entry>Cb<subscript>01</subscript></entry> + <entry>Cb<subscript>02</subscript></entry> + <entry>Cb<subscript>03</subscript></entry> + </row> + <row> + <entry>start1 + 4:</entry> + <entry>Cb<subscript>10</subscript></entry> + <entry>Cb<subscript>11</subscript></entry> + <entry>Cb<subscript>12</subscript></entry> + <entry>Cb<subscript>13</subscript></entry> + </row> + <row> + <entry>start1 + 8:</entry> + <entry>Cb<subscript>20</subscript></entry> + <entry>Cb<subscript>21</subscript></entry> + <entry>Cb<subscript>22</subscript></entry> + <entry>Cb<subscript>23</subscript></entry> + </row> + <row> + <entry>start1 + 12:</entry> + <entry>Cb<subscript>20</subscript></entry> + <entry>Cb<subscript>21</subscript></entry> + <entry>Cb<subscript>32</subscript></entry> + <entry>Cb<subscript>33</subscript></entry> + </row> + <row><entry></entry></row> + <row> + <entry>start2 + 0:</entry> + <entry>Cr<subscript>00</subscript></entry> + <entry>Cr<subscript>01</subscript></entry> + <entry>Cr<subscript>02</subscript></entry> + <entry>Cr<subscript>03</subscript></entry> + </row> + <row> + <entry>start2 + 4:</entry> + <entry>Cr<subscript>10</subscript></entry> + <entry>Cr<subscript>11</subscript></entry> + <entry>Cr<subscript>12</subscript></entry> + <entry>Cr<subscript>13</subscript></entry> + </row> + <row> + <entry>start2 + 8:</entry> + <entry>Cr<subscript>20</subscript></entry> + <entry>Cr<subscript>21</subscript></entry> + <entry>Cr<subscript>22</subscript></entry> + <entry>Cr<subscript>23</subscript></entry> + </row> + <row> + <entry>start2 + 12:</entry> + <entry>Cr<subscript>30</subscript></entry> + <entry>Cr<subscript>31</subscript></entry> + <entry>Cr<subscript>32</subscript></entry> + <entry>Cr<subscript>33</subscript></entry> + </row> + </tbody> + </tgroup> + </informaltable> + </para> + </formalpara> + + <formalpara> + <title>Color Sample Location.</title> + <para> + <informaltable frame="none"> + <tgroup cols="7" align="center"> + <tbody valign="top"> + <row> + <entry></entry> + <entry>0</entry><entry></entry><entry>1</entry><entry></entry> + <entry>2</entry><entry></entry><entry>3</entry> + </row> + <row> + <entry>0</entry> + <entry>YC</entry><entry></entry><entry>YC</entry><entry></entry> + <entry>YC</entry><entry></entry><entry>YC</entry> + </row> + <row> + <entry>1</entry> + <entry>YC</entry><entry></entry><entry>YC</entry><entry></entry> + <entry>YC</entry><entry></entry><entry>YC</entry> + </row> + <row> + <entry>2</entry> + <entry>YC</entry><entry></entry><entry>YC</entry><entry></entry> + <entry>YC</entry><entry></entry><entry>YC</entry> + </row> + <row> + <entry>3</entry> + <entry>YC</entry><entry></entry><entry>YC</entry><entry></entry> + <entry>YC</entry><entry></entry><entry>YC</entry> + </row> + </tbody> + </tgroup> + </informaltable> + </para> + </formalpara> + </example> + </refsect1> + </refentry> diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml index d871245d2973..2f02f9441443 100644 --- a/Documentation/DocBook/media/v4l/pixfmt.xml +++ b/Documentation/DocBook/media/v4l/pixfmt.xml @@ -1628,7 +1628,8 @@ information.</para> &sub-y41p; &sub-yuv420; &sub-yuv420m; - &sub-yvu420m; + &sub-yuv422m; + &sub-yuv444m; &sub-yuv410; &sub-yuv422p; &sub-yuv411p; diff --git a/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml index e9c70a8f3476..0c93677d16b4 100644 --- a/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml +++ b/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml @@ -60,9 +60,19 @@ input</refpurpose> automatically, similar to sensing the video standard. To do so, applications call <constant>VIDIOC_QUERY_DV_TIMINGS</constant> with a pointer to a &v4l2-dv-timings;. Once the hardware detects the timings, it will fill in the -timings structure. +timings structure.</para> -If the timings could not be detected because there was no signal, then +<para>Please note that drivers shall <emphasis>not</emphasis> switch timings automatically +if new timings are detected. Instead, drivers should send the +<constant>V4L2_EVENT_SOURCE_CHANGE</constant> event (if they support this) and expect +that userspace will take action by calling <constant>VIDIOC_QUERY_DV_TIMINGS</constant>. +The reason is that new timings usually mean different buffer sizes as well, and you +cannot change buffer sizes on the fly. In general, applications that receive the +Source Change event will have to call <constant>VIDIOC_QUERY_DV_TIMINGS</constant>, +and if the detected timings are valid they will have to stop streaming, set the new +timings, allocate new buffers and start streaming again.</para> + +<para>If the timings could not be detected because there was no signal, then <errorcode>ENOLINK</errorcode> is returned. If a signal was detected, but it was unstable and the receiver could not lock to the signal, then <errorcode>ENOLCK</errorcode> is returned. If the receiver could lock to the signal, diff --git a/Documentation/DocBook/media/v4l/vidioc-querystd.xml b/Documentation/DocBook/media/v4l/vidioc-querystd.xml index 222348542182..3ceae35fab03 100644 --- a/Documentation/DocBook/media/v4l/vidioc-querystd.xml +++ b/Documentation/DocBook/media/v4l/vidioc-querystd.xml @@ -59,6 +59,16 @@ then the driver will return V4L2_STD_UNKNOWN. When detection is not possible or fails, the set must contain all standards supported by the current video input or output.</para> +<para>Please note that drivers shall <emphasis>not</emphasis> switch the video standard +automatically if a new video standard is detected. Instead, drivers should send the +<constant>V4L2_EVENT_SOURCE_CHANGE</constant> event (if they support this) and expect +that userspace will take action by calling <constant>VIDIOC_QUERYSTD</constant>. +The reason is that a new video standard can mean different buffer sizes as well, and you +cannot change buffer sizes on the fly. In general, applications that receive the +Source Change event will have to call <constant>VIDIOC_QUERYSTD</constant>, +and if the detected video standard is valid they will have to stop streaming, set the new +standard, allocate new buffers and start streaming again.</para> + </refsect1> <refsect1> diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt index 0e6f0c024858..22756b3dede2 100644 --- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt +++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt @@ -6,6 +6,7 @@ Required properties: "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */ "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ + "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ - reg: physical base address and length of the registers set for the device - interrupts: should contain DSI interrupt diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt index 27c3ce0db16a..c7c6b9af87ac 100644 --- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt +++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt @@ -12,7 +12,8 @@ Required properties: "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */ - "samsung,exynos5250-fimd"; /* for Exynos5 SoCs */ + "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ + "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ - reg: physical base address and length of the FIMD registers set. diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index e7423bea1424..f5948c48b9a2 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -44,9 +44,34 @@ Optional properties: - pinctrl-names: the pin control state names; should contain "default" - pinctrl-0: the default pinctrl state (active) - pinctrl-n: the "sleep" pinctrl state -- port: DSI controller output port. This contains one endpoint subnode, with its - remote-endpoint set to the phandle of the connected panel's endpoint. - See Documentation/devicetree/bindings/graph.txt for device graph info. +- port: DSI controller output port, containing one endpoint subnode. + + DSI Endpoint properties: + - remote-endpoint: set to phandle of the connected panel's endpoint. + See Documentation/devicetree/bindings/graph.txt for device graph info. + - qcom,data-lane-map: this describes how the logical DSI lanes are mapped + to the physical lanes on the given platform. The value contained in + index n describes what logical data lane is mapped to the physical data + lane n (DATAn, where n lies between 0 and 3). + + For example: + + qcom,data-lane-map = <3 0 1 2>; + + The above mapping describes that the logical data lane DATA3 is mapped to + the physical data lane DATA0, logical DATA0 to physical DATA1, logic DATA1 + to phys DATA2 and logic DATA2 to phys DATA3. + + There are only a limited number of physical to logical mappings possible: + + "0123": Logic 0->Phys 0; Logic 1->Phys 1; Logic 2->Phys 2; Logic 3->Phys 3; + "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3; + "2301": Logic 2->Phys 0; Logic 3->Phys 1; Logic 0->Phys 2; Logic 1->Phys 3; + "1230": Logic 1->Phys 0; Logic 2->Phys 1; Logic 3->Phys 2; Logic 0->Phys 3; + "0321": Logic 0->Phys 0; Logic 3->Phys 1; Logic 2->Phys 2; Logic 1->Phys 3; + "1032": Logic 1->Phys 0; Logic 0->Phys 1; Logic 3->Phys 2; Logic 2->Phys 3; + "2103": Logic 2->Phys 0; Logic 1->Phys 1; Logic 0->Phys 2; Logic 3->Phys 3; + "3210": Logic 3->Phys 0; Logic 2->Phys 1; Logic 1->Phys 2; Logic 0->Phys 3; DSI PHY: Required properties: @@ -131,6 +156,7 @@ Example: port { dsi0_out: endpoint { remote-endpoint = <&panel_in>; + lanes = <0 1 2 3>; }; }; }; diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.txt b/Documentation/devicetree/bindings/display/msm/hdmi.txt index 379ee2ea9a3d..b63f614e0c04 100644 --- a/Documentation/devicetree/bindings/display/msm/hdmi.txt +++ b/Documentation/devicetree/bindings/display/msm/hdmi.txt @@ -11,6 +11,7 @@ Required properties: - reg: Physical base address and length of the controller's registers - reg-names: "core_physical" - interrupts: The interrupt signal from the hdmi block. +- power-domains: Should be <&mmcc MDSS_GDSC>. - clocks: device clocks See ../clocks/clock-bindings.txt for details. - qcom,hdmi-tx-ddc-clk-gpio: ddc clk pin @@ -18,6 +19,8 @@ Required properties: - qcom,hdmi-tx-hpd-gpio: hpd pin - core-vdda-supply: phandle to supply regulator - hdmi-mux-supply: phandle to mux regulator +- phys: the phandle for the HDMI PHY device +- phy-names: the name of the corresponding PHY device Optional properties: - qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin @@ -27,15 +30,38 @@ Optional properties: - pinctrl-0: the default pinctrl state (active) - pinctrl-1: the "sleep" pinctrl state +HDMI PHY: +Required properties: +- compatible: Could be the following + * "qcom,hdmi-phy-8660" + * "qcom,hdmi-phy-8960" + * "qcom,hdmi-phy-8974" + * "qcom,hdmi-phy-8084" + * "qcom,hdmi-phy-8996" +- #phy-cells: Number of cells in a PHY specifier; Should be 0. +- reg: Physical base address and length of the registers of the PHY sub blocks. +- reg-names: The names of register regions. The following regions are required: + * "hdmi_phy" + * "hdmi_pll" + For HDMI PHY on msm8996, these additional register regions are required: + * "hdmi_tx_l0" + * "hdmi_tx_l1" + * "hdmi_tx_l3" + * "hdmi_tx_l4" +- power-domains: Should be <&mmcc MDSS_GDSC>. +- clocks: device clocks + See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details. +- core-vdda-supply: phandle to vdda regulator device node + Example: / { ... - hdmi: qcom,hdmi-tx-8960@4a00000 { + hdmi: hdmi@4a00000 { compatible = "qcom,hdmi-tx-8960"; reg-names = "core_physical"; - reg = <0x04a00000 0x1000>; + reg = <0x04a00000 0x2f0>; interrupts = <GIC_SPI 79 0>; power-domains = <&mmcc MDSS_GDSC>; clock-names = @@ -54,5 +80,21 @@ Example: pinctrl-names = "default", "sleep"; pinctrl-0 = <&hpd_active &ddc_active &cec_active>; pinctrl-1 = <&hpd_suspend &ddc_suspend &cec_suspend>; + + phys = <&hdmi_phy>; + phy-names = "hdmi_phy"; + }; + + hdmi_phy: phy@4a00400 { + compatible = "qcom,hdmi-phy-8960"; + reg-names = "hdmi_phy", + "hdmi_pll"; + reg = <0x4a00400 0x60>, + <0x4a00500 0x100>; + #phy-cells = <0>; + power-domains = <&mmcc MDSS_GDSC>; + clock-names = "slave_iface_clk"; + clocks = <&mmcc HDMI_S_AHB_CLK>; + core-vdda-supply = <&pm8921_hdmi_mvs>; }; }; diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt index eccd4f4867b2..0d30e42e40be 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.txt +++ b/Documentation/devicetree/bindings/display/renesas,du.txt @@ -8,6 +8,7 @@ Required Properties: - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU - "renesas,du-r8a7793" for R8A7793 (R-Car M2-N) compatible DU - "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU + - "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU - reg: A list of base address and length of each memory resource, one for each entry in the reg-names property. @@ -24,7 +25,7 @@ Required Properties: - clock-names: Name of the clocks. This property is model-dependent. - R8A7779 uses a single functional clock. The clock doesn't need to be named. - - R8A779[0134] use one functional clock per channel and one clock per LVDS + - R8A779[01345] use one functional clock per channel and one clock per LVDS encoder (if available). The functional clocks must be named "du.x" with "x" being the channel numerical index. The LVDS clocks must be named "lvds.x" with "x" being the LVDS encoder numerical index. @@ -41,13 +42,14 @@ bindings specified in Documentation/devicetree/bindings/graph.txt. The following table lists for each supported model the port number corresponding to each DU output. - Port 0 Port1 Port2 + Port 0 Port1 Port2 Port3 ----------------------------------------------------------------------------- - R8A7779 (H1) DPAD 0 DPAD 1 - - R8A7790 (H2) DPAD LVDS 0 LVDS 1 - R8A7791 (M2-W) DPAD LVDS 0 - - R8A7793 (M2-N) DPAD LVDS 0 - - R8A7794 (E2) DPAD 0 DPAD 1 - + R8A7779 (H1) DPAD 0 DPAD 1 - - + R8A7790 (H2) DPAD LVDS 0 LVDS 1 - + R8A7791 (M2-W) DPAD LVDS 0 - - + R8A7793 (M2-N) DPAD LVDS 0 - - + R8A7794 (E2) DPAD 0 DPAD 1 - - + R8A7795 (H3) DPAD HDMI 0 HDMI 1 LVDS Example: R8A7790 (R-Car H2) DU diff --git a/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt new file mode 100644 index 000000000000..8096a29f9776 --- /dev/null +++ b/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt @@ -0,0 +1,50 @@ +Rockchip specific extensions to the Innosilicon HDMI +================================ + +Required properties: +- compatible: + "rockchip,rk3036-inno-hdmi"; +- reg: + Physical base address and length of the controller's registers. +- clocks, clock-names: + Phandle to hdmi controller clock, name should be "pclk" +- interrupts: + HDMI interrupt number +- ports: + Contain one port node with endpoint definitions as defined in + Documentation/devicetree/bindings/graph.txt. +- pinctrl-0, pinctrl-name: + Switch the iomux of HPD/CEC pins to HDMI function. + +Example: +hdmi: hdmi@20034000 { + compatible = "rockchip,rk3036-inno-hdmi"; + reg = <0x20034000 0x4000>; + interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru PCLK_HDMI>; + clock-names = "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_ctl>; + status = "disabled"; + + hdmi_in: port { + #address-cells = <1>; + #size-cells = <0>; + hdmi_in_lcdc: endpoint@0 { + reg = <0>; + remote-endpoint = <&lcdc_out_hdmi>; + }; + }; +}; + +&pinctrl { + hdmi { + hdmi_ctl: hdmi-ctl { + rockchip,pins = <1 8 RK_FUNC_1 &pcfg_pull_none>, + <1 9 RK_FUNC_1 &pcfg_pull_none>, + <1 10 RK_FUNC_1 &pcfg_pull_none>, + <1 11 RK_FUNC_1 &pcfg_pull_none>; + }; + }; + +}; diff --git a/Documentation/devicetree/bindings/media/i2c/mt9v032.txt b/Documentation/devicetree/bindings/media/i2c/mt9v032.txt index 202565313e82..100f0ae43269 100644 --- a/Documentation/devicetree/bindings/media/i2c/mt9v032.txt +++ b/Documentation/devicetree/bindings/media/i2c/mt9v032.txt @@ -20,6 +20,8 @@ Optional Properties: - link-frequencies: List of allowed link frequencies in Hz. Each frequency is expressed as a 64-bit big-endian integer. +- reset-gpios: GPIO handle which is connected to the reset pin of the chip. +- standby-gpios: GPIO handle which is connected to the standby pin of the chip. For further reading on port node refer to Documentation/devicetree/bindings/media/video-interfaces.txt. diff --git a/Documentation/devicetree/bindings/media/i2c/tvp5150.txt b/Documentation/devicetree/bindings/media/i2c/tvp5150.txt new file mode 100644 index 000000000000..daa20e43a8e3 --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/tvp5150.txt @@ -0,0 +1,88 @@ +* Texas Instruments TVP5150 and TVP5151 video decoders + +The TVP5150 and TVP5151 are video decoders that convert baseband NTSC and PAL +(and also SECAM in the TVP5151 case) video signals to either 8-bit 4:2:2 YUV +with discrete syncs or 8-bit ITU-R BT.656 with embedded syncs output formats. + +Required Properties: +- compatible: value must be "ti,tvp5150" +- reg: I2C slave address + +Optional Properties: +- pdn-gpios: phandle for the GPIO connected to the PDN pin, if any. +- reset-gpios: phandle for the GPIO connected to the RESETB pin, if any. + +Optional nodes: +- connectors: The input connectors of tvp5150 have to be defined under + a subnode name "connectors" using the following format: + + input-connector-name { + input connector properties + }; + +Each input connector must contain the following properties: + + - label: a name for the connector. + - input: the input connector. + +The possible values for the "input" property are: + 0: Composite0 + 1: Composite1 + 2: S-Video + +and on a tvp5150am1 and tvp5151 there is another: + 4: Signal generator + +The list of valid input connectors are defined in dt-bindings/media/tvp5150.h +header file and can be included by device tree source files. + +Each input connector can be defined only once. + +The device node must contain one 'port' child node for its digital output +video port, in accordance with the video interface bindings defined in +Documentation/devicetree/bindings/media/video-interfaces.txt. + +Required Endpoint Properties for parallel synchronization: + +- hsync-active: active state of the HSYNC signal. Must be <1> (HIGH). +- vsync-active: active state of the VSYNC signal. Must be <1> (HIGH). +- field-even-active: field signal level during the even field data + transmission. Must be <0>. + +If none of hsync-active, vsync-active and field-even-active is specified, +the endpoint is assumed to use embedded BT.656 synchronization. + +Example: + +&i2c2 { + ... + tvp5150@5c { + compatible = "ti,tvp5150"; + reg = <0x5c>; + pdn-gpios = <&gpio4 30 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio6 7 GPIO_ACTIVE_LOW>; + + connectors { + composite0 { + label = "Composite0"; + input = <TVP5150_COMPOSITE0>; + }; + + composite1 { + label = "Composite1"; + input = <TVP5150_COMPOSITE1>; + }; + + s-video { + label = "S-Video"; + input = <TVP5150_SVIDEO>; + }; + }; + + port { + tvp5150_1: endpoint { + remote-endpoint = <&ccdc_ep>; + }; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt index 9dafe6b06cd2..619193ccf7ff 100644 --- a/Documentation/devicetree/bindings/media/rcar_vin.txt +++ b/Documentation/devicetree/bindings/media/rcar_vin.txt @@ -6,6 +6,7 @@ family of devices. The current blocks are always slaves and suppot one input channel which can be either RGB, YUYV or BT656. - compatible: Must be one of the following + - "renesas,vin-r8a7795" for the R8A7795 device - "renesas,vin-r8a7794" for the R8A7794 device - "renesas,vin-r8a7793" for the R8A7793 device - "renesas,vin-r8a7791" for the R8A7791 device diff --git a/Documentation/devicetree/bindings/media/renesas,jpu.txt b/Documentation/devicetree/bindings/media/renesas,jpu.txt index 0cb94201bf92..d3436e5190f9 100644 --- a/Documentation/devicetree/bindings/media/renesas,jpu.txt +++ b/Documentation/devicetree/bindings/media/renesas,jpu.txt @@ -5,11 +5,12 @@ and decoding function conforming to the JPEG baseline process, so that the JPU can encode image data and decode JPEG data quickly. Required properties: - - compatible: should containg one of the following: - - "renesas,jpu-r8a7790" for R-Car H2 - - "renesas,jpu-r8a7791" for R-Car M2-W - - "renesas,jpu-r8a7792" for R-Car V2H - - "renesas,jpu-r8a7793" for R-Car M2-N +- compatible: "renesas,jpu-<soctype>", "renesas,rcar-gen2-jpu" as fallback. + Examples with soctypes are: + - "renesas,jpu-r8a7790" for R-Car H2 + - "renesas,jpu-r8a7791" for R-Car M2-W + - "renesas,jpu-r8a7792" for R-Car V2H + - "renesas,jpu-r8a7793" for R-Car M2-N - reg: Base address and length of the registers block for the JPU. - interrupts: JPU interrupt specifier. @@ -17,7 +18,7 @@ Required properties: Example: R8A7790 (R-Car H2) JPU node jpeg-codec@fe980000 { - compatible = "renesas,jpu-r8a7790"; + compatible = "renesas,jpu-r8a7790", "renesas,rcar-gen2-jpu"; reg = <0 0xfe980000 0 0x10300>; interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>; clocks = <&mstp1_clks R8A7790_CLK_JPU>; diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.txt b/Documentation/devicetree/bindings/media/renesas,vsp1.txt index 87fe08abf36d..627405abd144 100644 --- a/Documentation/devicetree/bindings/media/renesas,vsp1.txt +++ b/Documentation/devicetree/bindings/media/renesas,vsp1.txt @@ -1,30 +1,18 @@ -* Renesas VSP1 Video Processing Engine +* Renesas VSP Video Processing Engine -The VSP1 is a video processing engine that supports up-/down-scaling, alpha +The VSP is a video processing engine that supports up-/down-scaling, alpha blending, color space conversion and various other image processing features. It can be found in the Renesas R-Car second generation SoCs. Required properties: - - compatible: Must contain "renesas,vsp1" + - compatible: Must contain one of the following values + - "renesas,vsp1" for the R-Car Gen2 VSP1 + - "renesas,vsp2" for the R-Car Gen3 VSP2 - - reg: Base address and length of the registers block for the VSP1. - - interrupts: VSP1 interrupt specifier. - - clocks: A phandle + clock-specifier pair for the VSP1 functional clock. - - - renesas,#rpf: Number of Read Pixel Formatter (RPF) modules in the VSP1. - - renesas,#uds: Number of Up Down Scaler (UDS) modules in the VSP1. - - renesas,#wpf: Number of Write Pixel Formatter (WPF) modules in the VSP1. - - -Optional properties: - - - renesas,has-lif: Boolean, indicates that the LCD Interface (LIF) module is - available. - - renesas,has-lut: Boolean, indicates that the Look Up Table (LUT) module is - available. - - renesas,has-sru: Boolean, indicates that the Super Resolution Unit (SRU) - module is available. + - reg: Base address and length of the registers block for the VSP. + - interrupts: VSP interrupt specifier. + - clocks: A phandle + clock-specifier pair for the VSP functional clock. Example: R8A7790 (R-Car H2) VSP1-S node @@ -34,10 +22,4 @@ Example: R8A7790 (R-Car H2) VSP1-S node reg = <0 0xfe928000 0 0x8000>; interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>; clocks = <&mstp1_clks R8A7790_CLK_VSP1_S>; - - renesas,has-lut; - renesas,has-sru; - renesas,#rpf = <5>; - renesas,#uds = <3>; - renesas,#wpf = <4>; }; diff --git a/Documentation/devicetree/bindings/media/ti-cal.txt b/Documentation/devicetree/bindings/media/ti-cal.txt new file mode 100644 index 000000000000..ae9b52f37576 --- /dev/null +++ b/Documentation/devicetree/bindings/media/ti-cal.txt @@ -0,0 +1,72 @@ +Texas Instruments DRA72x CAMERA ADAPTATION LAYER (CAL) +------------------------------------------------------ + +The Camera Adaptation Layer (CAL) is a key component for image capture +applications. The capture module provides the system interface and the +processing capability to connect CSI2 image-sensor modules to the +DRA72x device. + +Required properties: +- compatible: must be "ti,dra72-cal" +- reg: CAL Top level, Receiver Core #0, Receiver Core #1 and Camera RX + control address space +- reg-names: cal_top, cal_rx_core0, cal_rx_core1, and camerrx_control + registers +- interrupts: should contain IRQ line for the CAL; + +CAL supports 2 camera port nodes on MIPI bus. Each CSI2 camera port nodes +should contain a 'port' child node with child 'endpoint' node. Please +refer to the bindings defined in +Documentation/devicetree/bindings/media/video-interfaces.txt. + +Example: + cal: cal@4845b000 { + compatible = "ti,dra72-cal"; + ti,hwmods = "cal"; + reg = <0x4845B000 0x400>, + <0x4845B800 0x40>, + <0x4845B900 0x40>, + <0x4A002e94 0x4>; + reg-names = "cal_top", + "cal_rx_core0", + "cal_rx_core1", + "camerrx_control"; + interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + csi2_0: port@0 { + reg = <0>; + endpoint { + slave-mode; + remote-endpoint = <&ar0330_1>; + }; + }; + csi2_1: port@1 { + reg = <1>; + }; + }; + }; + + i2c5: i2c@4807c000 { + ar0330@10 { + compatible = "ti,ar0330"; + reg = <0x10>; + + port { + #address-cells = <1>; + #size-cells = <0>; + + ar0330_1: endpoint { + reg = <0>; + clock-lanes = <1>; + data-lanes = <0 2 3 4>; + remote-endpoint = <&csi2_0>; + }; + }; + }; + }; diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt index 480c8de3c2c4..32ac32e773e1 100644 --- a/Documentation/dma-buf-sharing.txt +++ b/Documentation/dma-buf-sharing.txt @@ -257,17 +257,15 @@ Access to a dma_buf from the kernel context involves three steps: Interface: int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - size_t start, size_t len, enum dma_data_direction direction) This allows the exporter to ensure that the memory is actually available for cpu access - the exporter might need to allocate or swap-in and pin the backing storage. The exporter also needs to ensure that cpu access is - coherent for the given range and access direction. The range and access - direction can be used by the exporter to optimize the cache flushing, i.e. - access outside of the range or with a different direction (read instead of - write) might return stale or even bogus data (e.g. when the exporter needs to - copy the data to temporary storage). + coherent for the access direction. The direction can be used by the exporter + to optimize the cache flushing, i.e. access with a different direction (read + instead of write) might return stale or even bogus data (e.g. when the + exporter needs to copy the data to temporary storage). This step might fail, e.g. in oom conditions. @@ -322,14 +320,13 @@ Access to a dma_buf from the kernel context involves three steps: 3. Finish access - When the importer is done accessing the range specified in begin_cpu_access, - it needs to announce this to the exporter (to facilitate cache flushing and - unpinning of any pinned resources). The result of any dma_buf kmap calls - after end_cpu_access is undefined. + When the importer is done accessing the CPU, it needs to announce this to + the exporter (to facilitate cache flushing and unpinning of any pinned + resources). The result of any dma_buf kmap calls after end_cpu_access is + undefined. Interface: void dma_buf_end_cpu_access(struct dma_buf *dma_buf, - size_t start, size_t len, enum dma_data_direction dir); @@ -353,7 +350,26 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases: handles, too). So it's beneficial to support this in a similar fashion on dma-buf to have a good transition path for existing Android userspace. - No special interfaces, userspace simply calls mmap on the dma-buf fd. + No special interfaces, userspace simply calls mmap on the dma-buf fd, making + sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always* + used when the access happens. This is discussed next paragraphs. + + Some systems might need some sort of cache coherency management e.g. when + CPU and GPU domains are being accessed through dma-buf at the same time. To + circumvent this problem there are begin/end coherency markers, that forward + directly to existing dma-buf device drivers vfunc hooks. Userspace can make + use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The sequence + would be used like following: + - mmap dma-buf fd + - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write + to mmap area 3. SYNC_END ioctl. This can be repeated as often as you + want (with the new data being consumed by the GPU or say scanout device) + - munmap once you don't need the buffer any more + + Therefore, for correctness and optimal performance, systems with the memory + cache shared by the GPU and CPU i.e. the "coherent" and also the + "incoherent" are always required to use SYNC_START and SYNC_END before and + after, respectively, when accessing the mapped address. 2. Supporting existing mmap interfaces in importers diff --git a/Documentation/dvb/README.dvb-usb b/Documentation/dvb/README.dvb-usb index 669dc6ce4330..6f4b12f7b844 100644 --- a/Documentation/dvb/README.dvb-usb +++ b/Documentation/dvb/README.dvb-usb @@ -190,7 +190,7 @@ and watch another one. Patches, comments and suggestions are very very welcome. 3. Acknowledgements - Amaury Demol (ademol@dibcom.fr) and Francois Kanounnikoff from DiBcom for + Amaury Demol (Amaury.Demol@parrot.com) and Francois Kanounnikoff from DiBcom for providing specs, code and help, on which the dvb-dibusb, dib3000mb and dib3000mc are based. diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt index 5517db602f37..5e759cab4538 100644 --- a/Documentation/video4linux/v4l2-controls.txt +++ b/Documentation/video4linux/v4l2-controls.txt @@ -647,7 +647,6 @@ Or you can add specific controls to a handler: volume = v4l2_ctrl_new_std(&video_ctrl_handler, &ops, V4L2_CID_AUDIO_VOLUME, ...); v4l2_ctrl_new_std(&video_ctrl_handler, &ops, V4L2_CID_BRIGHTNESS, ...); v4l2_ctrl_new_std(&video_ctrl_handler, &ops, V4L2_CID_CONTRAST, ...); - v4l2_ctrl_add_ctrl(&radio_ctrl_handler, volume); What you should not do is make two identical controls for two handlers. For example: diff --git a/MAINTAINERS b/MAINTAINERS index 7f1fa4ff300a..ea1d1deaf153 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -827,6 +827,12 @@ S: Maintained F: drivers/net/arcnet/ F: include/uapi/linux/if_arcnet.h +ARM HDLCD DRM DRIVER +M: Liviu Dudau <liviu.dudau@arm.com> +S: Supported +F: drivers/gpu/drm/arm/ +F: Documentation/devicetree/bindings/display/arm,hdlcd.txt + ARM MFM AND FLOPPY DRIVERS M: Ian Molton <spyro@f2s.com> S: Maintained @@ -3758,7 +3764,7 @@ F: include/drm/exynos* F: include/uapi/drm/exynos* DRM DRIVERS FOR FREESCALE DCU -M: Jianwei Wang <jianwei.wang.chn@gmail.com> +M: Stefan Agner <stefan@agner.ch> M: Alison Wang <alison.wang@freescale.com> L: dri-devel@lists.freedesktop.org S: Supported @@ -10849,6 +10855,14 @@ L: linux-omap@vger.kernel.org S: Maintained F: drivers/thermal/ti-soc-thermal/ +TI VPE/CAL DRIVERS +M: Benoit Parrot <bparrot@ti.com> +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +S: Maintained +F: drivers/media/platform/ti-vpe/ + TI CDCE706 CLOCK DRIVER M: Max Filippov <jcmvbkbc@gmail.com> S: Maintained diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 1341a94cc779..aef87fdbd187 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -555,8 +555,10 @@ static unsigned int intel_gtt_mappable_entries(void) static void intel_gtt_teardown_scratch_page(void) { set_pages_wb(intel_private.scratch_page, 1); - pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (intel_private.needs_dmar) + pci_unmap_page(intel_private.pcidev, + intel_private.scratch_page_dma, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); __free_page(intel_private.scratch_page); } @@ -1346,16 +1348,6 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, { int i, mask; - /* - * Can be called from the fake agp driver but also directly from - * drm/i915.ko. Hence we need to check whether everything is set up - * already. - */ - if (intel_private.driver) { - intel_private.refcount++; - return 1; - } - for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { if (gpu_pdev) { if (gpu_pdev->device == @@ -1376,16 +1368,26 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, if (!intel_private.driver) return 0; - intel_private.refcount++; - #if IS_ENABLED(CONFIG_AGP_INTEL) if (bridge) { + if (INTEL_GTT_GEN > 1) + return 0; + bridge->driver = &intel_fake_agp_driver; bridge->dev_private_data = &intel_private; bridge->dev = bridge_pdev; } #endif + + /* + * Can be called from the fake agp driver but also directly from + * drm/i915.ko. Hence we need to check whether everything is set up + * already. + */ + if (intel_private.refcount++) + return 1; + intel_private.bridge_dev = pci_dev_get(bridge_pdev); dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); @@ -1430,6 +1432,8 @@ void intel_gmch_remove(void) if (--intel_private.refcount) return; + if (intel_private.scratch_page) + intel_gtt_teardown_scratch_page(); if (intel_private.pcidev) pci_dev_put(intel_private.pcidev); if (intel_private.bridge_dev) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 155c1464948e..9810d1df0691 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -34,6 +34,8 @@ #include <linux/poll.h> #include <linux/reservation.h> +#include <uapi/linux/dma-buf.h> + static inline int is_dma_buf_file(struct file *); struct dma_buf_list { @@ -251,11 +253,54 @@ out: return events; } +static long dma_buf_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct dma_buf *dmabuf; + struct dma_buf_sync sync; + enum dma_data_direction direction; + + dmabuf = file->private_data; + + switch (cmd) { + case DMA_BUF_IOCTL_SYNC: + if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) + return -EFAULT; + + if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) + return -EINVAL; + + switch (sync.flags & DMA_BUF_SYNC_RW) { + case DMA_BUF_SYNC_READ: + direction = DMA_FROM_DEVICE; + break; + case DMA_BUF_SYNC_WRITE: + direction = DMA_TO_DEVICE; + break; + case DMA_BUF_SYNC_RW: + direction = DMA_BIDIRECTIONAL; + break; + default: + return -EINVAL; + } + + if (sync.flags & DMA_BUF_SYNC_END) + dma_buf_end_cpu_access(dmabuf, direction); + else + dma_buf_begin_cpu_access(dmabuf, direction); + + return 0; + default: + return -ENOTTY; + } +} + static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, + .unlocked_ioctl = dma_buf_ioctl, }; /* @@ -539,13 +584,11 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); * preparations. Coherency is only guaranteed in the specified range for the * specified access direction. * @dmabuf: [in] buffer to prepare cpu access for. - * @start: [in] start of range for cpu access. - * @len: [in] length of range for cpu access. * @direction: [in] length of range for cpu access. * * Can return negative error values, returns 0 on success. */ -int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, +int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { int ret = 0; @@ -554,8 +597,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, return -EINVAL; if (dmabuf->ops->begin_cpu_access) - ret = dmabuf->ops->begin_cpu_access(dmabuf, start, - len, direction); + ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); return ret; } @@ -567,19 +609,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); * actions. Coherency is only guaranteed in the specified range for the * specified access direction. * @dmabuf: [in] buffer to complete cpu access for. - * @start: [in] start of range for cpu access. - * @len: [in] length of range for cpu access. * @direction: [in] length of range for cpu access. * * This call must always succeed. */ -void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, +void dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { WARN_ON(!dmabuf); if (dmabuf->ops->end_cpu_access) - dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); + dmabuf->ops->end_cpu_access(dmabuf, direction); } EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 8ae7ab68cb97..f2a74d0b68ae 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -25,6 +25,14 @@ config DRM_MIPI_DSI bool depends on DRM +config DRM_DP_AUX_CHARDEV + bool "DRM DP AUX Interface" + depends on DRM + help + Choose this option to enable a /dev/drm_dp_auxN node that allows to + read and write values to arbitrary DPCD registers on the DP aux + channel. + config DRM_KMS_HELPER tristate depends on DRM @@ -106,6 +114,8 @@ config DRM_TDFX Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), graphics card. If M is selected, the module will be called tdfx. +source "drivers/gpu/drm/arm/Kconfig" + config DRM_R128 tristate "ATI Rage 128" depends on DRM && PCI @@ -162,6 +172,8 @@ config DRM_AMDGPU source "drivers/gpu/drm/amd/amdgpu/Kconfig" source "drivers/gpu/drm/amd/powerplay/Kconfig" +source "drivers/gpu/drm/amd/acp/Kconfig" + source "drivers/gpu/drm/nouveau/Kconfig" config DRM_I810 diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 61766dec6a8d..6eb94fc561dc 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -22,10 +22,13 @@ drm-$(CONFIG_OF) += drm_of.o drm-$(CONFIG_AGP) += drm_agpsupport.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ - drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o + drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ + drm_kms_helper_common.o + drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o +drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o @@ -33,6 +36,7 @@ CFLAGS_drm_trace_points.o := -I$(src) obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o +obj-$(CONFIG_DRM_ARM) += arm/ obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_R128) += r128/ diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig new file mode 100644 index 000000000000..2b07813bceed --- /dev/null +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -0,0 +1,11 @@ +menu "ACP Configuration" + +config DRM_AMD_ACP + bool "Enable ACP IP support" + default y + select MFD_CORE + select PM_GENERIC_DOMAINS if PM + help + Choose this option to enable ACP IP support for AMD SOCs. + +endmenu diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile new file mode 100644 index 000000000000..8363cb57915b --- /dev/null +++ b/drivers/gpu/drm/amd/acp/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the ACP, which is a sub-component +# of AMDSOC/AMDGPU drm driver. +# It provides the HW control for ACP related functionalities. + +subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include + +AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o diff --git a/drivers/gpu/drm/amd/acp/acp_hw.c b/drivers/gpu/drm/amd/acp/acp_hw.c new file mode 100644 index 000000000000..7af83f142b4b --- /dev/null +++ b/drivers/gpu/drm/amd/acp/acp_hw.c @@ -0,0 +1,50 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/errno.h> + +#include "acp_gfx_if.h" + +#define ACP_MODE_I2S 0 +#define ACP_MODE_AZ 1 + +#define mmACP_AZALIA_I2S_SELECT 0x51d4 + +int amd_acp_hw_init(void *cgs_device, + unsigned acp_version_major, unsigned acp_version_minor) +{ + unsigned int acp_mode = ACP_MODE_I2S; + + if ((acp_version_major == 2) && (acp_version_minor == 2)) + acp_mode = cgs_read_register(cgs_device, + mmACP_AZALIA_I2S_SELECT); + + if (acp_mode != ACP_MODE_I2S) + return -ENODEV; + + return 0; +} diff --git a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h new file mode 100644 index 000000000000..bccf47b63899 --- /dev/null +++ b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h @@ -0,0 +1,34 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * +*/ + +#ifndef _ACP_GFX_IF_H +#define _ACP_GFX_IF_H + +#include <linux/types.h> +#include "cgs_linux.h" +#include "cgs_common.h" + +int amd_acp_hw_init(void *cgs_device, + unsigned acp_version_major, unsigned acp_version_minor); + +#endif /* _ACP_GFX_IF_H */ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 20c9539abc36..c7fcdcedaadb 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -8,7 +8,8 @@ ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_PATH)/include \ -I$(FULL_AMD_PATH)/amdgpu \ -I$(FULL_AMD_PATH)/scheduler \ - -I$(FULL_AMD_PATH)/powerplay/inc + -I$(FULL_AMD_PATH)/powerplay/inc \ + -I$(FULL_AMD_PATH)/acp/include amdgpu-y := amdgpu_drv.o @@ -20,7 +21,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ - atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \ + atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o @@ -92,7 +93,17 @@ amdgpu-y += amdgpu_cgs.o amdgpu-y += \ ../scheduler/gpu_scheduler.o \ ../scheduler/sched_fence.o \ - amdgpu_sched.o + amdgpu_job.o + +# ACP componet +ifneq ($(CONFIG_DRM_AMD_ACP),) +amdgpu-y += amdgpu_acp.o + +AMDACPPATH := ../acp +include $(FULL_AMD_PATH)/acp/Makefile + +amdgpu-y += $(AMD_ACP_FILES) +endif amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 82edf95b7740..f5bac97a438b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -53,6 +53,7 @@ #include "amdgpu_ucode.h" #include "amdgpu_gds.h" #include "amd_powerplay.h" +#include "amdgpu_acp.h" #include "gpu_scheduler.h" @@ -74,7 +75,6 @@ extern int amdgpu_dpm; extern int amdgpu_smc_load_fw; extern int amdgpu_aspm; extern int amdgpu_runtime_pm; -extern int amdgpu_hard_reset; extern unsigned amdgpu_ip_block_mask; extern int amdgpu_bapm; extern int amdgpu_deep_color; @@ -82,10 +82,8 @@ extern int amdgpu_vm_size; extern int amdgpu_vm_block_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; -extern int amdgpu_enable_scheduler; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; -extern int amdgpu_enable_semaphores; extern int amdgpu_powerplay; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 @@ -106,9 +104,6 @@ extern int amdgpu_powerplay; /* max number of IP instances */ #define AMDGPU_MAX_SDMA_INSTANCES 2 -/* number of hw syncs before falling back on blocking */ -#define AMDGPU_NUM_SYNCS 4 - /* hardcode that limit for now */ #define AMDGPU_VA_RESERVED_SIZE (8 << 20) @@ -189,7 +184,6 @@ struct amdgpu_fence; struct amdgpu_ib; struct amdgpu_vm; struct amdgpu_ring; -struct amdgpu_semaphore; struct amdgpu_cs_parser; struct amdgpu_job; struct amdgpu_irq_src; @@ -287,7 +281,7 @@ struct amdgpu_vm_pte_funcs { unsigned count); /* write pte one entry at a time with addr mapping */ void (*write_pte)(struct amdgpu_ib *ib, - uint64_t pe, + const dma_addr_t *pages_addr, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); /* for linear pte/pde updates without addr mapping */ @@ -295,8 +289,6 @@ struct amdgpu_vm_pte_funcs { uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); - /* pad the indirect buffer to the necessary number of dw */ - void (*pad_ib)(struct amdgpu_ib *ib); }; /* provided by the gmc block */ @@ -334,9 +326,6 @@ struct amdgpu_ring_funcs { struct amdgpu_ib *ib); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); - bool (*emit_semaphore)(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait); void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr); void (*emit_hdp_flush)(struct amdgpu_ring *ring); @@ -349,6 +338,8 @@ struct amdgpu_ring_funcs { int (*test_ib)(struct amdgpu_ring *ring); /* insert NOP packets */ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); + /* pad the indirect buffer to the necessary number of dw */ + void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); }; /* @@ -394,7 +385,7 @@ struct amdgpu_fence_driver { uint64_t gpu_addr; volatile uint32_t *cpu_addr; /* sync_seq is protected by ring emission lock */ - uint64_t sync_seq[AMDGPU_MAX_RINGS]; + uint64_t sync_seq; atomic64_t last_seq; bool initialized; struct amdgpu_irq_src *irq_src; @@ -447,11 +438,6 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); -bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *ring); -void amdgpu_fence_note_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *ring); - /* * TTM. */ @@ -470,6 +456,8 @@ struct amdgpu_mman { /* buffer handling */ const struct amdgpu_buffer_funcs *buffer_funcs; struct amdgpu_ring *buffer_funcs_ring; + /* Scheduler entity for buffer moves */ + struct amd_sched_entity entity; }; int amdgpu_copy_buffer(struct amdgpu_ring *ring, @@ -484,8 +472,6 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo *robj; struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; - unsigned prefered_domains; - unsigned allowed_domains; uint32_t priority; }; @@ -522,7 +508,8 @@ struct amdgpu_bo { /* Protected by gem.mutex */ struct list_head list; /* Protected by tbo.reserved */ - u32 initial_domain; + u32 prefered_domains; + u32 allowed_domains; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; struct ttm_placement placement; struct ttm_buffer_object tbo; @@ -544,7 +531,6 @@ struct amdgpu_bo { struct amdgpu_bo *parent; struct ttm_bo_kmap_obj dma_buf_vmap; - pid_t pid; struct amdgpu_mn *mn; struct list_head mn_list; }; @@ -621,13 +607,7 @@ struct amdgpu_sa_bo { /* * GEM objects. */ -struct amdgpu_gem { - struct mutex mutex; - struct list_head objects; -}; - -int amdgpu_gem_init(struct amdgpu_device *adev); -void amdgpu_gem_fini(struct amdgpu_device *adev); +void amdgpu_gem_force_release(struct amdgpu_device *adev); int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, bool kernel, @@ -639,32 +619,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, int amdgpu_mode_dumb_mmap(struct drm_file *filp, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); - -/* - * Semaphores. - */ -struct amdgpu_semaphore { - struct amdgpu_sa_bo *sa_bo; - signed waiters; - uint64_t gpu_addr; -}; - -int amdgpu_semaphore_create(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore); -bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore); -bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore); -void amdgpu_semaphore_free(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore, - struct fence *fence); - /* * Synchronization */ struct amdgpu_sync { - struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; - struct fence *sync_to[AMDGPU_MAX_RINGS]; DECLARE_HASHTABLE(fences, 4); struct fence *last_vm_update; }; @@ -676,12 +634,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); -int amdgpu_sync_rings(struct amdgpu_sync *sync, - struct amdgpu_ring *ring); struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); int amdgpu_sync_wait(struct amdgpu_sync *sync); -void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct fence *fence); +void amdgpu_sync_free(struct amdgpu_sync *sync); /* * GART structures, functions & helpers @@ -799,6 +754,7 @@ struct amdgpu_flip_work { struct fence *excl; unsigned shared_count; struct fence **shared; + struct fence_cb cb; }; @@ -811,12 +767,11 @@ struct amdgpu_ib { uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; - struct amdgpu_ring *ring; struct amdgpu_fence *fence; struct amdgpu_user_fence *user; + bool grabbed_vmid; struct amdgpu_vm *vm; struct amdgpu_ctx *ctx; - struct amdgpu_sync sync; uint32_t gds_base, gds_size; uint32_t gws_base, gws_size; uint32_t oa_base, oa_size; @@ -835,13 +790,14 @@ enum amdgpu_ring_type { extern struct amd_sched_backend_ops amdgpu_sched_ops; -int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_ib *ibs, - unsigned num_ibs, - int (*free_job)(struct amdgpu_job *), - void *owner, - struct fence **fence); +int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, + struct amdgpu_job **job); +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, + struct amdgpu_job **job); +void amdgpu_job_free(struct amdgpu_job *job); +int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, + struct amd_sched_entity *entity, void *owner, + struct fence **f); struct amdgpu_ring { struct amdgpu_device *adev; @@ -850,7 +806,6 @@ struct amdgpu_ring { struct amd_gpu_scheduler sched; spinlock_t fence_lock; - struct mutex *ring_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; unsigned rptr_offs; @@ -859,7 +814,7 @@ struct amdgpu_ring { unsigned wptr; unsigned wptr_old; unsigned ring_size; - unsigned ring_free_dw; + unsigned max_dw; int count_dw; uint64_t gpu_addr; uint32_t align_mask; @@ -867,8 +822,6 @@ struct amdgpu_ring { bool ready; u32 nop; u32 idx; - u64 last_semaphore_signal_addr; - u64 last_semaphore_wait_addr; u32 me; u32 pipe; u32 queue; @@ -881,7 +834,6 @@ struct amdgpu_ring { struct amdgpu_ctx *current_ctx; enum amdgpu_ring_type type; char name[16]; - bool is_pte_ring; }; /* @@ -932,6 +884,8 @@ struct amdgpu_vm_id { }; struct amdgpu_vm { + /* tree of virtual addresses mapped */ + spinlock_t it_lock; struct rb_root va; /* protecting invalidated */ @@ -956,30 +910,40 @@ struct amdgpu_vm { /* for id and flush management per ring */ struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; - /* for interval tree */ - spinlock_t it_lock; + /* protecting freed */ spinlock_t freed_lock; + + /* Scheduler entity for page table updates */ + struct amd_sched_entity entity; +}; + +struct amdgpu_vm_manager_id { + struct list_head list; + struct fence *active; + atomic_long_t owner; }; struct amdgpu_vm_manager { - struct { - struct fence *active; - atomic_long_t owner; - } ids[AMDGPU_NUM_VM]; + /* Handling of VMIDs */ + struct mutex lock; + unsigned num_ids; + struct list_head ids_lru; + struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM]; uint32_t max_pfn; - /* number of VMIDs */ - unsigned nvm; /* vram base address for page table entry */ u64 vram_base_offset; /* is vm enabled? */ bool enabled; /* vm pte handling */ const struct amdgpu_vm_pte_funcs *vm_pte_funcs; - struct amdgpu_ring *vm_pte_funcs_ring; + struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; + unsigned vm_pte_num_rings; + atomic_t vm_pte_next_ring; }; +void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); @@ -990,14 +954,11 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync); + struct amdgpu_sync *sync, struct fence *fence); void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_vm *vm, struct fence *updates); -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct fence *fence); -uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); +uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, @@ -1023,7 +984,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, uint64_t addr); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -int amdgpu_vm_free_job(struct amdgpu_job *job); /* * context related structures @@ -1051,10 +1011,6 @@ struct amdgpu_ctx_mgr { struct idr ctx_handles; }; -int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, - struct amdgpu_ctx *ctx); -void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); - struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); @@ -1096,6 +1052,8 @@ struct amdgpu_bo_list { struct amdgpu_bo_list * amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); +void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, + struct list_head *validated); void amdgpu_bo_list_put(struct amdgpu_bo_list *list); void amdgpu_bo_list_free(struct amdgpu_bo_list *list); @@ -1169,6 +1127,7 @@ struct amdgpu_gca_config { unsigned multi_gpu_tile_size; unsigned mc_arb_ramcfg; unsigned gb_addr_config; + unsigned num_rbs; uint32_t tile_mode_array[32]; uint32_t macrotile_mode_array[16]; @@ -1211,23 +1170,21 @@ struct amdgpu_gfx { unsigned ce_ram_size; }; -int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, +int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); -int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_ib *ib, void *owner); +int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, + struct amdgpu_ib *ib, void *owner, + struct fence *last_vm_update, + struct fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); -/* Ring access between begin & end cannot sleep */ -void amdgpu_ring_free_size(struct amdgpu_ring *ring); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); -int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); +void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); -void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, uint32_t **data); int amdgpu_ring_restore(struct amdgpu_ring *ring, @@ -1246,47 +1203,57 @@ struct amdgpu_cs_chunk { uint32_t chunk_id; uint32_t length_dw; uint32_t *kdata; - void __user *user_ptr; }; struct amdgpu_cs_parser { struct amdgpu_device *adev; struct drm_file *filp; struct amdgpu_ctx *ctx; - struct amdgpu_bo_list *bo_list; + /* chunks */ unsigned nchunks; struct amdgpu_cs_chunk *chunks; - /* relocations */ - struct amdgpu_bo_list_entry vm_pd; - struct list_head validated; - struct fence *fence; - struct amdgpu_ib *ibs; - uint32_t num_ibs; + /* scheduler job object */ + struct amdgpu_job *job; - struct ww_acquire_ctx ticket; + /* buffer objects */ + struct ww_acquire_ctx ticket; + struct amdgpu_bo_list *bo_list; + struct amdgpu_bo_list_entry vm_pd; + struct list_head validated; + struct fence *fence; + uint64_t bytes_moved_threshold; + uint64_t bytes_moved; /* user fence */ - struct amdgpu_user_fence uf; struct amdgpu_bo_list_entry uf_entry; }; struct amdgpu_job { struct amd_sched_job base; struct amdgpu_device *adev; + struct amdgpu_ring *ring; + struct amdgpu_sync sync; struct amdgpu_ib *ibs; uint32_t num_ibs; void *owner; struct amdgpu_user_fence uf; - int (*free_job)(struct amdgpu_job *job); }; #define to_amdgpu_job(sched_job) \ container_of((sched_job), struct amdgpu_job, base) -static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) +static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, + uint32_t ib_idx, int idx) { - return p->ibs[ib_idx].ptr[idx]; + return p->job->ibs[ib_idx].ptr[idx]; +} + +static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, + uint32_t ib_idx, int idx, + uint32_t value) +{ + p->job->ibs[ib_idx].ptr[idx] = value; } /* @@ -1538,6 +1505,7 @@ enum amdgpu_dpm_forced_level { AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, AMDGPU_DPM_FORCED_LEVEL_LOW = 1, AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, + AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, }; struct amdgpu_vce_state { @@ -1667,6 +1635,7 @@ struct amdgpu_uvd { struct amdgpu_ring ring; struct amdgpu_irq_src irq; bool address_64_bit; + struct amd_sched_entity entity; }; /* @@ -1691,6 +1660,7 @@ struct amdgpu_vce { struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; unsigned harvest_config; + struct amd_sched_entity entity; }; /* @@ -1925,6 +1895,18 @@ void amdgpu_cgs_destroy_device(void *cgs_device); /* + * CGS + */ +void *amdgpu_cgs_create_device(struct amdgpu_device *adev); +void amdgpu_cgs_destroy_device(void *cgs_device); + + +/* GPU virtualization */ +struct amdgpu_virtualization { + bool supports_sr_iov; +}; + +/* * Core structure, functions and helpers. */ typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); @@ -1944,6 +1926,10 @@ struct amdgpu_device { struct drm_device *ddev; struct pci_dev *pdev; +#ifdef CONFIG_DRM_AMD_ACP + struct amdgpu_acp acp; +#endif + /* ASIC */ enum amd_asic_type asic_type; uint32_t family; @@ -2020,7 +2006,6 @@ struct amdgpu_device { /* memory management */ struct amdgpu_mman mman; - struct amdgpu_gem gem; struct amdgpu_vram_scratch vram_scratch; struct amdgpu_wb wb; atomic64_t vram_usage; @@ -2038,7 +2023,6 @@ struct amdgpu_device { /* rings */ unsigned fence_context; - struct mutex ring_lock; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; @@ -2050,6 +2034,7 @@ struct amdgpu_device { /* powerplay */ struct amd_powerplay powerplay; bool pp_enabled; + bool pp_force_state_enabled; /* dpm */ struct amdgpu_pm pm; @@ -2091,8 +2076,7 @@ struct amdgpu_device { /* amdkfd interface */ struct kfd_dev *kfd; - /* kernel conext for IB submission */ - struct amdgpu_ctx kernel_ctx; + struct amdgpu_virtualization virtualization; }; bool amdgpu_device_is_px(struct drm_device *dev); @@ -2197,7 +2181,6 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) ring->ring[ring->wptr++] = v; ring->wptr &= ring->ptr_mask; ring->count_dw--; - ring->ring_free_dw--; } static inline struct amdgpu_sdma_instance * @@ -2233,9 +2216,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) -#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags))) +#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) -#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) @@ -2245,9 +2227,9 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) -#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait)) #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) +#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) @@ -2339,6 +2321,21 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_get_performance_level(adev) \ (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) +#define amdgpu_dpm_get_pp_num_states(adev, data) \ + (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) + +#define amdgpu_dpm_get_pp_table(adev, table) \ + (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) + +#define amdgpu_dpm_set_pp_table(adev, buf, size) \ + (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) + +#define amdgpu_dpm_print_clock_levels(adev, type, buf) \ + (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) + +#define amdgpu_dpm_force_clock_level(adev, type, level) \ + (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) + #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) @@ -2349,7 +2346,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); -bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, @@ -2359,7 +2355,9 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, uint32_t flags); -bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); +struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, + unsigned long end); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c new file mode 100644 index 000000000000..9f8cfaab3004 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -0,0 +1,502 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/irqdomain.h> +#include <linux/pm_domain.h> +#include <linux/platform_device.h> +#include <sound/designware_i2s.h> +#include <sound/pcm.h> + +#include "amdgpu.h" +#include "atom.h" +#include "amdgpu_acp.h" + +#include "acp_gfx_if.h" + +#define ACP_TILE_ON_MASK 0x03 +#define ACP_TILE_OFF_MASK 0x02 +#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f +#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 + +#define ACP_TILE_P1_MASK 0x3e +#define ACP_TILE_P2_MASK 0x3d +#define ACP_TILE_DSP0_MASK 0x3b +#define ACP_TILE_DSP1_MASK 0x37 + +#define ACP_TILE_DSP2_MASK 0x2f + +#define ACP_DMA_REGS_END 0x146c0 +#define ACP_I2S_PLAY_REGS_START 0x14840 +#define ACP_I2S_PLAY_REGS_END 0x148b4 +#define ACP_I2S_CAP_REGS_START 0x148b8 +#define ACP_I2S_CAP_REGS_END 0x1496c + +#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac +#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 +#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c +#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 + +#define mmACP_PGFSM_RETAIN_REG 0x51c9 +#define mmACP_PGFSM_CONFIG_REG 0x51ca +#define mmACP_PGFSM_READ_REG_0 0x51cc + +#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 +#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 +#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa +#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb + +#define ACP_TIMEOUT_LOOP 0x000000FF +#define ACP_DEVS 3 +#define ACP_SRC_ID 162 + +enum { + ACP_TILE_P1 = 0, + ACP_TILE_P2, + ACP_TILE_DSP0, + ACP_TILE_DSP1, + ACP_TILE_DSP2, +}; + +static int acp_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->acp.parent = adev->dev; + + adev->acp.cgs_device = + amdgpu_cgs_create_device(adev); + if (!adev->acp.cgs_device) + return -EINVAL; + + return 0; +} + +static int acp_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->acp.cgs_device) + amdgpu_cgs_destroy_device(adev->acp.cgs_device); + + return 0; +} + +/* power off a tile/block within ACP */ +static int acp_suspend_tile(void *cgs_dev, int tile) +{ + u32 val = 0; + u32 count = 0; + + if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { + pr_err("Invalid ACP tile : %d to suspend\n", tile); + return -1; + } + + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); + val &= ACP_TILE_ON_MASK; + + if (val == 0x0) { + val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); + val = val | (1 << tile); + cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); + cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, + 0x500 + tile); + + count = ACP_TIMEOUT_LOOP; + while (true) { + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + + tile); + val = val & ACP_TILE_ON_MASK; + if (val == ACP_TILE_OFF_MASK) + break; + if (--count == 0) { + pr_err("Timeout reading ACP PGFSM status\n"); + return -ETIMEDOUT; + } + udelay(100); + } + + val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); + + val |= ACP_TILE_OFF_RETAIN_REG_MASK; + cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); + } + return 0; +} + +/* power on a tile/block within ACP */ +static int acp_resume_tile(void *cgs_dev, int tile) +{ + u32 val = 0; + u32 count = 0; + + if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { + pr_err("Invalid ACP tile to resume\n"); + return -1; + } + + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); + val = val & ACP_TILE_ON_MASK; + + if (val != 0x0) { + cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, + 0x600 + tile); + count = ACP_TIMEOUT_LOOP; + while (true) { + val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + + tile); + val = val & ACP_TILE_ON_MASK; + if (val == 0x0) + break; + if (--count == 0) { + pr_err("Timeout reading ACP PGFSM status\n"); + return -ETIMEDOUT; + } + udelay(100); + } + val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); + if (tile == ACP_TILE_P1) + val = val & (ACP_TILE_P1_MASK); + else if (tile == ACP_TILE_P2) + val = val & (ACP_TILE_P2_MASK); + + cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); + } + return 0; +} + +struct acp_pm_domain { + void *cgs_dev; + struct generic_pm_domain gpd; +}; + +static int acp_poweroff(struct generic_pm_domain *genpd) +{ + int i, ret; + struct acp_pm_domain *apd; + + apd = container_of(genpd, struct acp_pm_domain, gpd); + if (apd != NULL) { + /* Donot return abruptly if any of power tile fails to suspend. + * Log it and continue powering off other tile + */ + for (i = 4; i >= 0 ; i--) { + ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); + if (ret) + pr_err("ACP tile %d tile suspend failed\n", i); + } + } + return 0; +} + +static int acp_poweron(struct generic_pm_domain *genpd) +{ + int i, ret; + struct acp_pm_domain *apd; + + apd = container_of(genpd, struct acp_pm_domain, gpd); + if (apd != NULL) { + for (i = 0; i < 2; i++) { + ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i); + if (ret) { + pr_err("ACP tile %d resume failed\n", i); + break; + } + } + + /* Disable DSPs which are not going to be used */ + for (i = 0; i < 3; i++) { + ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i); + /* Continue suspending other DSP, even if one fails */ + if (ret) + pr_err("ACP DSP %d suspend failed\n", i); + } + } + return 0; +} + +static struct device *get_mfd_cell_dev(const char *device_name, int r) +{ + char auto_dev_name[25]; + char buf[8]; + struct device *dev; + + sprintf(buf, ".%d.auto", r); + strcpy(auto_dev_name, device_name); + strcat(auto_dev_name, buf); + dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); + dev_info(dev, "device %s added to pm domain\n", auto_dev_name); + + return dev; +} + +/** + * acp_hw_init - start and test ACP block + * + * @adev: amdgpu_device pointer + * + */ +static int acp_hw_init(void *handle) +{ + int r, i; + uint64_t acp_base; + struct device *dev; + struct i2s_platform_data *i2s_pdata; + + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + const struct amdgpu_ip_block_version *ip_version = + amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); + + if (!ip_version) + return -EINVAL; + + r = amd_acp_hw_init(adev->acp.cgs_device, + ip_version->major, ip_version->minor); + /* -ENODEV means board uses AZ rather than ACP */ + if (r == -ENODEV) + return 0; + else if (r) + return r; + + r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO, + 0x5289, 0, &acp_base); + if (r == -ENODEV) + return 0; + else if (r) + return r; + + adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); + if (adev->acp.acp_genpd == NULL) + return -ENOMEM; + + adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; + adev->acp.acp_genpd->gpd.power_off = acp_poweroff; + adev->acp.acp_genpd->gpd.power_on = acp_poweron; + + + adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; + + pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); + + adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS, + GFP_KERNEL); + + if (adev->acp.acp_cell == NULL) + return -ENOMEM; + + adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL); + + if (adev->acp.acp_res == NULL) { + kfree(adev->acp.acp_cell); + return -ENOMEM; + } + + i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL); + if (i2s_pdata == NULL) { + kfree(adev->acp.acp_res); + kfree(adev->acp.acp_cell); + return -ENOMEM; + } + + i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + i2s_pdata[0].cap = DWC_I2S_PLAY; + i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; + i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; + i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; + + i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_COMP_PARAM1; + i2s_pdata[1].cap = DWC_I2S_RECORD; + i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; + i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; + i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; + + adev->acp.acp_res[0].name = "acp2x_dma"; + adev->acp.acp_res[0].flags = IORESOURCE_MEM; + adev->acp.acp_res[0].start = acp_base; + adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END; + + adev->acp.acp_res[1].name = "acp2x_dw_i2s_play"; + adev->acp.acp_res[1].flags = IORESOURCE_MEM; + adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START; + adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END; + + adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap"; + adev->acp.acp_res[2].flags = IORESOURCE_MEM; + adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; + adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; + + adev->acp.acp_res[3].name = "acp2x_dma_irq"; + adev->acp.acp_res[3].flags = IORESOURCE_IRQ; + adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162); + adev->acp.acp_res[3].end = adev->acp.acp_res[3].start; + + adev->acp.acp_cell[0].name = "acp_audio_dma"; + adev->acp.acp_cell[0].num_resources = 4; + adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; + + adev->acp.acp_cell[1].name = "designware-i2s"; + adev->acp.acp_cell[1].num_resources = 1; + adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; + adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; + adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); + + adev->acp.acp_cell[2].name = "designware-i2s"; + adev->acp.acp_cell[2].num_resources = 1; + adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2]; + adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; + adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); + + r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, + ACP_DEVS); + if (r) + return r; + + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); + if (r) { + dev_err(dev, "Failed to add dev to genpd\n"); + return r; + } + } + + return 0; +} + +/** + * acp_hw_fini - stop the hardware block + * + * @adev: amdgpu_device pointer + * + */ +static int acp_hw_fini(void *handle) +{ + int i, ret; + struct device *dev; + + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); + /* If removal fails, dont giveup and try rest */ + if (ret) + dev_err(dev, "remove dev from genpd failed\n"); + } + + mfd_remove_devices(adev->acp.parent); + kfree(adev->acp.acp_res); + kfree(adev->acp.acp_genpd); + kfree(adev->acp.acp_cell); + + return 0; +} + +static int acp_suspend(void *handle) +{ + return 0; +} + +static int acp_resume(void *handle) +{ + int i, ret; + struct acp_pm_domain *apd; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* SMU block will power on ACP irrespective of ACP runtime status. + * Power off explicitly based on genpd ACP runtime status so that ACP + * hw and ACP-genpd status are in sync. + * 'suspend_power_off' represents "Power status before system suspend" + */ + if (adev->acp.acp_genpd->gpd.suspend_power_off == true) { + apd = container_of(&adev->acp.acp_genpd->gpd, + struct acp_pm_domain, gpd); + + for (i = 4; i >= 0 ; i--) { + ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); + if (ret) + pr_err("ACP tile %d tile suspend failed\n", i); + } + } + return 0; +} + +static int acp_early_init(void *handle) +{ + return 0; +} + +static bool acp_is_idle(void *handle) +{ + return true; +} + +static int acp_wait_for_idle(void *handle) +{ + return 0; +} + +static int acp_soft_reset(void *handle) +{ + return 0; +} + +static void acp_print_status(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + dev_info(adev->dev, "ACP STATUS\n"); +} + +static int acp_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int acp_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs acp_ip_funcs = { + .early_init = acp_early_init, + .late_init = NULL, + .sw_init = acp_sw_init, + .sw_fini = acp_sw_fini, + .hw_init = acp_hw_init, + .hw_fini = acp_hw_fini, + .suspend = acp_suspend, + .resume = acp_resume, + .is_idle = acp_is_idle, + .wait_for_idle = acp_wait_for_idle, + .soft_reset = acp_soft_reset, + .print_status = acp_print_status, + .set_clockgating_state = acp_set_clockgating_state, + .set_powergating_state = acp_set_powergating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h new file mode 100644 index 000000000000..f6e32a639107 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h @@ -0,0 +1,42 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_ACP_H__ +#define __AMDGPU_ACP_H__ + +#include <linux/mfd/core.h> + +struct amdgpu_acp { + struct device *parent; + void *cgs_device; + struct amd_acp_private *private; + struct mfd_cell *acp_cell; + struct resource *acp_res; + struct acp_pm_domain *acp_genpd; +}; + +extern const struct amd_ip_funcs acp_ip_funcs; + +#endif /* __AMDGPU_ACP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 84d68d658f8a..32809f749903 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -30,25 +30,38 @@ const struct kfd2kgd_calls *kfd2kgd; const struct kgd2kfd_calls *kgd2kfd; bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); -bool amdgpu_amdkfd_init(void) +int amdgpu_amdkfd_init(void) { + int ret; + #if defined(CONFIG_HSA_AMD_MODULE) - bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); + int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); kgd2kfd_init_p = symbol_request(kgd2kfd_init); if (kgd2kfd_init_p == NULL) - return false; + return -ENOENT; + + ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd); + if (ret) { + symbol_put(kgd2kfd_init); + kgd2kfd = NULL; + } + +#elif defined(CONFIG_HSA_AMD) + ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd); + if (ret) + kgd2kfd = NULL; + +#else + ret = -ENOENT; #endif - return true; + + return ret; } bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev) { -#if defined(CONFIG_HSA_AMD_MODULE) - bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); -#endif - switch (rdev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: @@ -62,35 +75,7 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev) return false; } -#if defined(CONFIG_HSA_AMD_MODULE) - kgd2kfd_init_p = symbol_request(kgd2kfd_init); - - if (kgd2kfd_init_p == NULL) { - kfd2kgd = NULL; - return false; - } - - if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) { - symbol_put(kgd2kfd_init); - kfd2kgd = NULL; - kgd2kfd = NULL; - - return false; - } - return true; -#elif defined(CONFIG_HSA_AMD) - if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) { - kfd2kgd = NULL; - kgd2kfd = NULL; - return false; - } - - return true; -#else - kfd2kgd = NULL; - return false; -#endif } void amdgpu_amdkfd_fini(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index a8be765542e6..de530f68d4e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -36,7 +36,7 @@ struct kgd_mem { void *cpu_ptr; }; -bool amdgpu_amdkfd_init(void); +int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 9416e0f5c1db..84b0ce39ee14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1514,6 +1514,19 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, return -EINVAL; } +bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev) +{ + int index = GetIndexIntoMasterTable(DATA, GPUVirtualizationInfo); + u8 frev, crev; + u16 data_offset, size; + + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) + return true; + + return false; +} + void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) { uint32_t bios_6_scratch; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 0ebb959ea435..9e1442053fe4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -196,6 +196,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, u8 module_index, struct atom_mc_reg_table *reg_table); +bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); + void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 3c895863fcf5..fa948dcbdd5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -552,13 +552,14 @@ static bool amdgpu_atpx_detect(void) void amdgpu_register_atpx_handler(void) { bool r; + enum vga_switcheroo_handler_flags_t handler_flags = 0; /* detect if we have any ATPX + 2 VGA in the system */ r = amdgpu_atpx_detect(); if (!r) return; - vga_switcheroo_register_handler(&amdgpu_atpx_handler); + vga_switcheroo_register_handler(&amdgpu_atpx_handler, handler_flags); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index f82a2dd83874..90d6fc1618aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -32,6 +32,9 @@ #include "amdgpu.h" #include "amdgpu_trace.h" +#define AMDGPU_BO_LIST_MAX_PRIORITY 32u +#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) + static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv, struct amdgpu_bo_list **result, int *id) @@ -90,6 +93,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, bool has_userptr = false; unsigned i; + int r; array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); if (!array) @@ -99,31 +103,34 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, for (i = 0; i < num_entries; ++i) { struct amdgpu_bo_list_entry *entry = &array[i]; struct drm_gem_object *gobj; + struct mm_struct *usermm; gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); - if (!gobj) + if (!gobj) { + r = -ENOENT; goto error_free; + } entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_unreference_unlocked(gobj); - entry->priority = info[i].bo_priority; - entry->prefered_domains = entry->robj->initial_domain; - entry->allowed_domains = entry->prefered_domains; - if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) - entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; - if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) { + entry->priority = min(info[i].bo_priority, + AMDGPU_BO_LIST_MAX_PRIORITY); + usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm); + if (usermm) { + if (usermm != current->mm) { + r = -EPERM; + goto error_free; + } has_userptr = true; - entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; - entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; } entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; - if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) + if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; - if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) + if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; - if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA) + if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; trace_amdgpu_bo_list_set(list, entry->robj); @@ -145,7 +152,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, error_free: drm_free_large(array); - return -ENOENT; + return r; } struct amdgpu_bo_list * @@ -161,6 +168,36 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id) return result; } +void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, + struct list_head *validated) +{ + /* This is based on the bucket sort with O(n) time complexity. + * An item with priority "i" is added to bucket[i]. The lists are then + * concatenated in descending order. + */ + struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; + unsigned i; + + for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) + INIT_LIST_HEAD(&bucket[i]); + + /* Since buffers which appear sooner in the relocation list are + * likely to be used more often than buffers which appear later + * in the list, the sort mustn't change the ordering of buffers + * with the same priority, i.e. it must be stable. + */ + for (i = 0; i < list->num_entries; i++) { + unsigned priority = list->array[i].priority; + + list_add_tail(&list->array[i].tv.head, + &bucket[priority]); + } + + /* Connect the sorted buckets in the output list. */ + for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) + list_splice(&bucket[i], validated); +} + void amdgpu_bo_list_put(struct amdgpu_bo_list *list) { mutex_unlock(&list->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b882e8175615..52c3eb96b199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -30,47 +30,6 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -#define AMDGPU_CS_MAX_PRIORITY 32u -#define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1) - -/* This is based on the bucket sort with O(n) time complexity. - * An item with priority "i" is added to bucket[i]. The lists are then - * concatenated in descending order. - */ -struct amdgpu_cs_buckets { - struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; -}; - -static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b) -{ - unsigned i; - - for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) - INIT_LIST_HEAD(&b->bucket[i]); -} - -static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b, - struct list_head *item, unsigned priority) -{ - /* Since buffers which appear sooner in the relocation list are - * likely to be used more often than buffers which appear later - * in the list, the sort mustn't change the ordering of buffers - * with the same priority, i.e. it must be stable. - */ - list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]); -} - -static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b, - struct list_head *out_list) -{ - unsigned i; - - /* Connect the sorted buckets in the output list. */ - for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) { - list_splice(&b->bucket[i], out_list); - } -} - int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, u32 ip_instance, u32 ring, struct amdgpu_ring **out_ring) @@ -128,6 +87,7 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, + struct amdgpu_user_fence *uf, struct drm_amdgpu_cs_chunk_fence *fence_data) { struct drm_gem_object *gobj; @@ -139,17 +99,15 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, if (gobj == NULL) return -EINVAL; - p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - p->uf.offset = fence_data->offset; + uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); + uf->offset = fence_data->offset; - if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { + if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) { drm_gem_object_unreference_unlocked(gobj); return -EINVAL; } - p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); - p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; - p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + p->uf_entry.robj = amdgpu_bo_ref(uf->bo); p->uf_entry.priority = 0; p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; p->uf_entry.tv.shared = true; @@ -160,11 +118,12 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; uint64_t *chunk_array; - struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned size; + struct amdgpu_user_fence uf = {}; + unsigned size, num_ibs = 0; int i; int ret; @@ -181,15 +140,12 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) goto free_chunk; } - p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); - /* get chunks */ - INIT_LIST_HEAD(&p->validated); chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { ret = -EFAULT; - goto put_bo_list; + goto put_ctx; } p->nchunks = cs->in.num_chunks; @@ -197,7 +153,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) GFP_KERNEL); if (!p->chunks) { ret = -ENOMEM; - goto put_bo_list; + goto put_ctx; } for (i = 0; i < p->nchunks; i++) { @@ -217,7 +173,6 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) size = p->chunks[i].length_dw; cdata = (void __user *)(unsigned long)user_chunk.chunk_data; - p->chunks[i].user_ptr = cdata; p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); if (p->chunks[i].kdata == NULL) { @@ -233,7 +188,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) switch (p->chunks[i].chunk_id) { case AMDGPU_CHUNK_ID_IB: - p->num_ibs++; + ++num_ibs; break; case AMDGPU_CHUNK_ID_FENCE: @@ -243,7 +198,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) goto free_partial_kdata; } - ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); + ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata); if (ret) goto free_partial_kdata; @@ -258,12 +213,11 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } } - - p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!p->ibs) { - ret = -ENOMEM; + ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job); + if (ret) goto free_all_kdata; - } + + p->job->uf = uf; kfree(chunk_array); return 0; @@ -274,9 +228,7 @@ free_partial_kdata: for (; i >= 0; i--) drm_free_large(p->chunks[i].kdata); kfree(p->chunks); -put_bo_list: - if (p->bo_list) - amdgpu_bo_list_put(p->bo_list); +put_ctx: amdgpu_ctx_put(p->ctx); free_chunk: kfree(chunk_array); @@ -336,80 +288,76 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) return max(bytes_moved_threshold, 1024*1024ull); } -int amdgpu_cs_list_validate(struct amdgpu_device *adev, - struct amdgpu_vm *vm, +int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, struct list_head *validated) { struct amdgpu_bo_list_entry *lobj; - struct amdgpu_bo *bo; - u64 bytes_moved = 0, initial_bytes_moved; - u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); + u64 initial_bytes_moved; int r; list_for_each_entry(lobj, validated, tv.head) { - bo = lobj->robj; - if (!bo->pin_count) { - u32 domain = lobj->prefered_domains; - u32 current_domain = - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - - /* Check if this buffer will be moved and don't move it - * if we have moved too many buffers for this IB already. - * - * Note that this allows moving at least one buffer of - * any size, because it doesn't take the current "bo" - * into account. We don't want to disallow buffer moves - * completely. - */ - if ((lobj->allowed_domains & current_domain) != 0 && - (domain & current_domain) == 0 && /* will be moved */ - bytes_moved > bytes_moved_threshold) { - /* don't move it */ - domain = current_domain; - } + struct amdgpu_bo *bo = lobj->robj; + struct mm_struct *usermm; + uint32_t domain; - retry: - amdgpu_ttm_placement_from_domain(bo, domain); - initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - bytes_moved += atomic64_read(&adev->num_bytes_moved) - - initial_bytes_moved; - - if (unlikely(r)) { - if (r != -ERESTARTSYS && domain != lobj->allowed_domains) { - domain = lobj->allowed_domains; - goto retry; - } - return r; + usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); + if (usermm && usermm != current->mm) + return -EPERM; + + if (bo->pin_count) + continue; + + /* Avoid moving this one if we have moved too many buffers + * for this IB already. + * + * Note that this allows moving at least one buffer of + * any size, because it doesn't take the current "bo" + * into account. We don't want to disallow buffer moves + * completely. + */ + if (p->bytes_moved <= p->bytes_moved_threshold) + domain = bo->prefered_domains; + else + domain = bo->allowed_domains; + + retry: + amdgpu_ttm_placement_from_domain(bo, domain); + initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - + initial_bytes_moved; + + if (unlikely(r)) { + if (r != -ERESTARTSYS && domain != bo->allowed_domains) { + domain = bo->allowed_domains; + goto retry; } + return r; } - lobj->bo_va = amdgpu_vm_bo_find(vm, bo); } return 0; } -static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) +static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, + union drm_amdgpu_cs *cs) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_cs_buckets buckets; struct list_head duplicates; bool need_mmap_lock = false; - int i, r; + int r; + INIT_LIST_HEAD(&p->validated); + + p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); if (p->bo_list) { need_mmap_lock = p->bo_list->has_userptr; - amdgpu_cs_buckets_init(&buckets); - for (i = 0; i < p->bo_list->num_entries; i++) - amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head, - p->bo_list->array[i].priority); - - amdgpu_cs_buckets_get_list(&buckets, &p->validated); + amdgpu_bo_list_get_list(p->bo_list, &p->validated); } INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); - if (p->uf.bo) + if (p->job->uf.bo) list_add(&p->uf_entry.tv.head, &p->validated); if (need_mmap_lock) @@ -421,11 +369,27 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); + p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); + p->bytes_moved = 0; + + r = amdgpu_cs_list_validate(p, &duplicates); + if (r) + goto error_validate; + + r = amdgpu_cs_list_validate(p, &p->validated); if (r) goto error_validate; - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); + if (p->bo_list) { + struct amdgpu_vm *vm = &fpriv->vm; + unsigned i; + + for (i = 0; i < p->bo_list->num_entries; i++) { + struct amdgpu_bo *bo = p->bo_list->array[i].robj; + + p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); + } + } error_validate: if (r) { @@ -447,7 +411,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) list_for_each_entry(e, &p->validated, tv.head) { struct reservation_object *resv = e->robj->tbo.resv; - r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp); + r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); if (r) return r; @@ -510,11 +474,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); - if (parser->ibs) - for (i = 0; i < parser->num_ibs; i++) - amdgpu_ib_free(parser->adev, &parser->ibs[i]); - kfree(parser->ibs); - amdgpu_bo_unref(&parser->uf.bo); + if (parser->job) + amdgpu_job_free(parser->job); amdgpu_bo_unref(&parser->uf_entry.robj); } @@ -530,7 +491,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (r) return r; - r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence); + r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence); if (r) return r; @@ -556,14 +517,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, return r; f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f); + r = amdgpu_sync_fence(adev, &p->job->sync, f); if (r) return r; } } - r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync); + r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); if (amdgpu_vm_debug && p->bo_list) { /* Invalidate all BOs to test for userspace bugs */ @@ -581,29 +542,25 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, } static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, - struct amdgpu_cs_parser *parser) + struct amdgpu_cs_parser *p) { - struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_ring *ring; + struct amdgpu_ring *ring = p->job->ring; int i, r; - if (parser->num_ibs == 0) - return 0; - /* Only for UVD/VCE VM emulation */ - for (i = 0; i < parser->num_ibs; i++) { - ring = parser->ibs[i].ring; - if (ring->funcs->parse_cs) { - r = amdgpu_ring_parse_cs(ring, parser, i); + if (ring->funcs->parse_cs) { + for (i = 0; i < p->job->num_ibs; i++) { + r = amdgpu_ring_parse_cs(ring, p, i); if (r) return r; } } - r = amdgpu_bo_vm_update_pte(parser, vm); + r = amdgpu_bo_vm_update_pte(p, vm); if (!r) - amdgpu_cs_sync_rings(parser); + amdgpu_cs_sync_rings(p); return r; } @@ -626,14 +583,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, int i, j; int r; - for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) { + for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { struct amdgpu_cs_chunk *chunk; struct amdgpu_ib *ib; struct drm_amdgpu_cs_chunk_ib *chunk_ib; struct amdgpu_ring *ring; chunk = &parser->chunks[i]; - ib = &parser->ibs[j]; + ib = &parser->job->ibs[j]; chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) @@ -645,6 +602,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (r) return r; + if (parser->job->ring && parser->job->ring != ring) + return -EINVAL; + + parser->job->ring = ring; + if (ring->funcs->parse_cs) { struct amdgpu_bo_va_mapping *m; struct amdgpu_bo *aobj = NULL; @@ -673,7 +635,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; kptr += chunk_ib->va_start - offset; - r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); + r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; @@ -682,7 +644,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); amdgpu_bo_kunmap(aobj); } else { - r = amdgpu_ib_get(ring, vm, 0, ib); + r = amdgpu_ib_get(adev, vm, 0, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; @@ -697,15 +659,12 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, j++; } - if (!parser->num_ibs) - return 0; - /* add GDS resources to first IB */ if (parser->bo_list) { struct amdgpu_bo *gds = parser->bo_list->gds_obj; struct amdgpu_bo *gws = parser->bo_list->gws_obj; struct amdgpu_bo *oa = parser->bo_list->oa_obj; - struct amdgpu_ib *ib = &parser->ibs[0]; + struct amdgpu_ib *ib = &parser->job->ibs[0]; if (gds) { ib->gds_base = amdgpu_bo_gpu_offset(gds); @@ -721,15 +680,15 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, } } /* wrap the last IB with user fence */ - if (parser->uf.bo) { - struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; + if (parser->job->uf.bo) { + struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; /* UVD & VCE fw doesn't support user fences */ - if (ib->ring->type == AMDGPU_RING_TYPE_UVD || - ib->ring->type == AMDGPU_RING_TYPE_VCE) + if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD || + parser->job->ring->type == AMDGPU_RING_TYPE_VCE) return -EINVAL; - ib->user = &parser->uf; + ib->user = &parser->job->uf; } return 0; @@ -739,14 +698,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_ib *ib; int i, j, r; - if (!p->num_ibs) - return 0; - - /* Add dependencies to first IB */ - ib = &p->ibs[0]; for (i = 0; i < p->nchunks; ++i) { struct drm_amdgpu_cs_chunk_dep *deps; struct amdgpu_cs_chunk *chunk; @@ -784,7 +737,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, return r; } else if (fence) { - r = amdgpu_sync_fence(adev, &ib->sync, fence); + r = amdgpu_sync_fence(adev, &p->job->sync, + fence); fence_put(fence); amdgpu_ctx_put(ctx); if (r) @@ -796,15 +750,36 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, return 0; } -static int amdgpu_cs_free_job(struct amdgpu_job *job) +static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, + union drm_amdgpu_cs *cs) { - int i; - if (job->ibs) - for (i = 0; i < job->num_ibs; i++) - amdgpu_ib_free(job->adev, &job->ibs[i]); - kfree(job->ibs); - if (job->uf.bo) - amdgpu_bo_unref(&job->uf.bo); + struct amdgpu_ring *ring = p->job->ring; + struct amd_sched_fence *fence; + struct amdgpu_job *job; + + job = p->job; + p->job = NULL; + + job->base.sched = &ring->sched; + job->base.s_entity = &p->ctx->rings[ring->idx].entity; + job->owner = p->filp; + + fence = amd_sched_fence_create(job->base.s_entity, p->filp); + if (!fence) { + amdgpu_job_free(job); + return -ENOMEM; + } + + job->base.s_fence = fence; + p->fence = fence_get(&fence->base); + + cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, + &fence->base); + job->ibs[job->num_ibs - 1].sequence = cs->out.handle; + + trace_amdgpu_cs_ioctl(job); + amd_sched_entity_push_job(&job->base); + return 0; } @@ -829,7 +804,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_handle_lockup(adev, r); return r; } - r = amdgpu_cs_parser_relocs(&parser); + r = amdgpu_cs_parser_bos(&parser, data); if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); else if (r && r != -ERESTARTSYS) @@ -848,68 +823,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) goto out; - for (i = 0; i < parser.num_ibs; i++) + for (i = 0; i < parser.job->num_ibs; i++) trace_amdgpu_cs(&parser, i); r = amdgpu_cs_ib_vm_chunk(adev, &parser); if (r) goto out; - if (amdgpu_enable_scheduler && parser.num_ibs) { - struct amdgpu_ring * ring = parser.ibs->ring; - struct amd_sched_fence *fence; - struct amdgpu_job *job; - - job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); - if (!job) { - r = -ENOMEM; - goto out; - } - - job->base.sched = &ring->sched; - job->base.s_entity = &parser.ctx->rings[ring->idx].entity; - job->adev = parser.adev; - job->owner = parser.filp; - job->free_job = amdgpu_cs_free_job; - - job->ibs = parser.ibs; - job->num_ibs = parser.num_ibs; - parser.ibs = NULL; - parser.num_ibs = 0; - - if (job->ibs[job->num_ibs - 1].user) { - job->uf = parser.uf; - job->ibs[job->num_ibs - 1].user = &job->uf; - parser.uf.bo = NULL; - } - - fence = amd_sched_fence_create(job->base.s_entity, - parser.filp); - if (!fence) { - r = -ENOMEM; - amdgpu_cs_free_job(job); - kfree(job); - goto out; - } - job->base.s_fence = fence; - parser.fence = fence_get(&fence->base); - - cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, - &fence->base); - job->ibs[job->num_ibs - 1].sequence = cs->out.handle; - - trace_amdgpu_cs_ioctl(job); - amd_sched_entity_push_job(&job->base); - - } else { - struct amdgpu_fence *fence; - - r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, - parser.filp); - fence = parser.ibs[parser.num_ibs - 1].fence; - parser.fence = fence_get(&fence->base); - cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; - } + r = amdgpu_cs_submit(&parser, cs); out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); @@ -980,30 +901,36 @@ struct amdgpu_bo_va_mapping * amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo) { - struct amdgpu_bo_list_entry *reloc; struct amdgpu_bo_va_mapping *mapping; + unsigned i; + + if (!parser->bo_list) + return NULL; addr /= AMDGPU_GPU_PAGE_SIZE; - list_for_each_entry(reloc, &parser->validated, tv.head) { - if (!reloc->bo_va) + for (i = 0; i < parser->bo_list->num_entries; i++) { + struct amdgpu_bo_list_entry *lobj; + + lobj = &parser->bo_list->array[i]; + if (!lobj->bo_va) continue; - list_for_each_entry(mapping, &reloc->bo_va->valids, list) { + list_for_each_entry(mapping, &lobj->bo_va->valids, list) { if (mapping->it.start > addr || addr > mapping->it.last) continue; - *bo = reloc->bo_va->bo; + *bo = lobj->bo_va->bo; return mapping; } - list_for_each_entry(mapping, &reloc->bo_va->invalids, list) { + list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { if (mapping->it.start > addr || addr > mapping->it.last) continue; - *bo = reloc->bo_va->bo; + *bo = lobj->bo_va->bo; return mapping; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 17d1fb12128a..17e13621fae9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -25,8 +25,7 @@ #include <drm/drmP.h> #include "amdgpu.h" -int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, - struct amdgpu_ctx *ctx) +static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) { unsigned i, j; int r; @@ -35,44 +34,38 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, ctx->adev = adev; kref_init(&ctx->refcount); spin_lock_init(&ctx->ring_lock); - ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs * - AMDGPU_MAX_RINGS, GFP_KERNEL); + ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, + sizeof(struct fence*), GFP_KERNEL); if (!ctx->fences) return -ENOMEM; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { ctx->rings[i].sequence = 1; - ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) * - amdgpu_sched_jobs * i; + ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; } - if (amdgpu_enable_scheduler) { - /* create context entity for each ring */ - for (i = 0; i < adev->num_rings; i++) { - struct amd_sched_rq *rq; - if (pri >= AMD_SCHED_MAX_PRIORITY) { - kfree(ctx->fences); - return -EINVAL; - } - rq = &adev->rings[i]->sched.sched_rq[pri]; - r = amd_sched_entity_init(&adev->rings[i]->sched, - &ctx->rings[i].entity, - rq, amdgpu_sched_jobs); - if (r) - break; - } - - if (i < adev->num_rings) { - for (j = 0; j < i; j++) - amd_sched_entity_fini(&adev->rings[j]->sched, - &ctx->rings[j].entity); - kfree(ctx->fences); - return r; - } + /* create context entity for each ring */ + for (i = 0; i < adev->num_rings; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + struct amd_sched_rq *rq; + + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; + r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, + rq, amdgpu_sched_jobs); + if (r) + break; + } + + if (i < adev->num_rings) { + for (j = 0; j < i; j++) + amd_sched_entity_fini(&adev->rings[j]->sched, + &ctx->rings[j].entity); + kfree(ctx->fences); + return r; } return 0; } -void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) +static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) { struct amdgpu_device *adev = ctx->adev; unsigned i, j; @@ -85,11 +78,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) fence_put(ctx->rings[i].fences[j]); kfree(ctx->fences); - if (amdgpu_enable_scheduler) { - for (i = 0; i < adev->num_rings; i++) - amd_sched_entity_fini(&adev->rings[i]->sched, - &ctx->rings[i].entity); - } + for (i = 0; i < adev->num_rings; i++) + amd_sched_entity_fini(&adev->rings[i]->sched, + &ctx->rings[i].entity); } static int amdgpu_ctx_alloc(struct amdgpu_device *adev, @@ -112,7 +103,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, return r; } *id = (uint32_t)r; - r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx); + r = amdgpu_ctx_init(adev, ctx); if (r) { idr_remove(&mgr->ctx_handles, *id); *id = 0; @@ -200,18 +191,18 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, id = args->in.ctx_id; switch (args->in.op) { - case AMDGPU_CTX_OP_ALLOC_CTX: - r = amdgpu_ctx_alloc(adev, fpriv, &id); - args->out.alloc.ctx_id = id; - break; - case AMDGPU_CTX_OP_FREE_CTX: - r = amdgpu_ctx_free(fpriv, id); - break; - case AMDGPU_CTX_OP_QUERY_STATE: - r = amdgpu_ctx_query(adev, fpriv, id, &args->out); - break; - default: - return -EINVAL; + case AMDGPU_CTX_OP_ALLOC_CTX: + r = amdgpu_ctx_alloc(adev, fpriv, &id); + args->out.alloc.ctx_id = id; + break; + case AMDGPU_CTX_OP_FREE_CTX: + r = amdgpu_ctx_free(fpriv, id); + break; + case AMDGPU_CTX_OP_QUERY_STATE: + r = amdgpu_ctx_query(adev, fpriv, id, &args->out); + break; + default: + return -EINVAL; } return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 65531463f88e..db20d2783def 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -636,31 +636,6 @@ bool amdgpu_card_posted(struct amdgpu_device *adev) } /** - * amdgpu_boot_test_post_card - check and possibly initialize the hw - * - * @adev: amdgpu_device pointer - * - * Check if the asic is initialized and if not, attempt to initialize - * it (all asics). - * Returns true if initialized or false if not. - */ -bool amdgpu_boot_test_post_card(struct amdgpu_device *adev) -{ - if (amdgpu_card_posted(adev)) - return true; - - if (adev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - if (adev->is_atom_bios) - amdgpu_atom_asic_init(adev->mode_info.atom_context); - return true; - } else { - dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); - return false; - } -} - -/** * amdgpu_dummy_page_init - init dummy page used by the driver * * @adev: amdgpu_device pointer @@ -959,12 +934,6 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_sched_jobs); amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - /* vramlimit must be a power of two */ - if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) { - dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n", - amdgpu_vram_limit); - amdgpu_vram_limit = 0; - } if (amdgpu_gart_size != -1) { /* gtt size must be power of two and greater or equal to 32M */ @@ -1434,7 +1403,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs_ring = NULL; adev->vm_manager.vm_pte_funcs = NULL; - adev->vm_manager.vm_pte_funcs_ring = NULL; + adev->vm_manager.vm_pte_num_rings = 0; adev->gart.gart_funcs = NULL; adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); @@ -1455,9 +1424,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* mutex initialization are all done here so we * can recall function without having locking issues */ - mutex_init(&adev->ring_lock); + mutex_init(&adev->vm_manager.lock); atomic_set(&adev->irq.ih.lock, 0); - mutex_init(&adev->gem.mutex); mutex_init(&adev->pm.mutex); mutex_init(&adev->gfx.gpu_clock_mutex); mutex_init(&adev->srbm_mutex); @@ -1531,8 +1499,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, return r; } + /* See if the asic supports SR-IOV */ + adev->virtualization.supports_sr_iov = + amdgpu_atombios_has_gpu_virtualization_table(adev); + /* Post card if necessary */ - if (!amdgpu_card_posted(adev)) { + if (!amdgpu_card_posted(adev) || + adev->virtualization.supports_sr_iov) { if (!adev->bios) { dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); return -EINVAL; @@ -1577,11 +1550,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, return r; } - r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx); - if (r) { - dev_err(adev->dev, "failed to create kernel context (%d).\n", r); - return r; - } r = amdgpu_ib_ring_tests(adev); if (r) DRM_ERROR("ib ring test failed (%d).\n", r); @@ -1645,7 +1613,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->shutdown = true; /* evict vram memory */ amdgpu_bo_evict_vram(adev); - amdgpu_ctx_fini(&adev->kernel_ctx); amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); @@ -1889,6 +1856,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) retry: r = amdgpu_asic_reset(adev); + /* post card */ + amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (!r) { dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); r = amdgpu_resume(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index acd066d0a805..2cb53c24dec0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -35,32 +35,30 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> -static void amdgpu_flip_wait_fence(struct amdgpu_device *adev, - struct fence **f) +static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) { - struct amdgpu_fence *fence; - long r; + struct amdgpu_flip_work *work = + container_of(cb, struct amdgpu_flip_work, cb); - if (*f == NULL) - return; + fence_put(f); + schedule_work(&work->flip_work); +} - fence = to_amdgpu_fence(*f); - if (fence) { - r = fence_wait(&fence->base, false); - if (r == -EDEADLK) - r = amdgpu_gpu_reset(adev); - } else - r = fence_wait(*f, false); +static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, + struct fence **f) +{ + struct fence *fence= *f; - if (r) - DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r); + if (fence == NULL) + return false; - /* We continue with the page flip even if we failed to wait on - * the fence, otherwise the DRM core and userspace will be - * confused about which BO the CRTC is scanning out - */ - fence_put(*f); *f = NULL; + + if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) + return true; + + fence_put(*f); + return false; } static void amdgpu_flip_work_func(struct work_struct *__work) @@ -76,9 +74,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work) int vpos, hpos, stat, min_udelay; struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; - amdgpu_flip_wait_fence(adev, &work->excl); + if (amdgpu_flip_handle_fence(work, &work->excl)) + return; + for (i = 0; i < work->shared_count; ++i) - amdgpu_flip_wait_fence(adev, &work->shared[i]); + if (amdgpu_flip_handle_fence(work, &work->shared[i])) + return; /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -118,12 +119,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work) spin_lock_irqsave(&crtc->dev->event_lock, flags); }; - /* do the flip (mmio) */ - adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); /* set the flip status */ amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + /* Do the flip (mmio) */ + adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); } /* @@ -242,7 +243,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, /* update crtc fb */ crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - queue_work(amdgpu_crtc->pflip_queue, &work->flip_work); + amdgpu_flip_work_func(&work->flip_work); return 0; vblank_cleanup: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9c1af8976bef..01b4fd6115c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -69,7 +69,6 @@ int amdgpu_dpm = -1; int amdgpu_smc_load_fw = 1; int amdgpu_aspm = -1; int amdgpu_runtime_pm = -1; -int amdgpu_hard_reset = 0; unsigned amdgpu_ip_block_mask = 0xffffffff; int amdgpu_bapm = -1; int amdgpu_deep_color = 0; @@ -78,10 +77,8 @@ int amdgpu_vm_block_size = -1; int amdgpu_vm_fault_stop = 0; int amdgpu_vm_debug = 0; int amdgpu_exp_hw_support = 0; -int amdgpu_enable_scheduler = 1; int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; -int amdgpu_enable_semaphores = 0; int amdgpu_powerplay = -1; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); @@ -126,9 +123,6 @@ module_param_named(aspm, amdgpu_aspm, int, 0444); MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); module_param_named(runpm, amdgpu_runtime_pm, int, 0444); -MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); -module_param_named(hard_reset, amdgpu_hard_reset, int, 0444); - MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))"); module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444); @@ -153,18 +147,12 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); -MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)"); -module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); - MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)"); module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); -MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))"); -module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); - #ifdef CONFIG_DRM_AMD_POWERPLAY MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); module_param_named(powerplay, amdgpu_powerplay, int, 0444); @@ -322,6 +310,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } + /* + * Initialize amdkfd before starting radeon. If it was not loaded yet, + * defer radeon probing + */ + ret = amdgpu_amdkfd_init(); + if (ret == -EPROBE_DEFER) + return ret; + /* Get rid of things like offb */ ret = amdgpu_kick_out_firmware_fb(pdev); if (ret) @@ -564,8 +560,6 @@ static int __init amdgpu_init(void) driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); - amdgpu_amdkfd_init(); - /* let modprobe override vga console setting */ return drm_pci_init(driver, pdriver); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3671f9f220bd..97db196dc6f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -107,7 +107,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, if ((*fence) == NULL) { return -ENOMEM; } - (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; + (*fence)->seq = ++ring->fence_drv.sync_seq; (*fence)->ring = ring; (*fence)->owner = owner; fence_init(&(*fence)->base, &amdgpu_fence_ops, @@ -171,7 +171,7 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring) */ last_seq = atomic64_read(&ring->fence_drv.last_seq); do { - last_emitted = ring->fence_drv.sync_seq[ring->idx]; + last_emitted = ring->fence_drv.sync_seq; seq = amdgpu_fence_read(ring); seq |= last_seq & 0xffffffff00000000LL; if (seq < last_seq) { @@ -260,34 +260,28 @@ static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq) } /* - * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal + * amdgpu_ring_wait_seq - wait for seq of the specific ring to signal * @ring: ring to wait on for the seq number * @seq: seq number wait for * * return value: * 0: seq signaled, and gpu not hang - * -EDEADL: GPU hang detected * -EINVAL: some paramter is not valid */ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) { - bool signaled = false; - BUG_ON(!ring); - if (seq > ring->fence_drv.sync_seq[ring->idx]) + if (seq > ring->fence_drv.sync_seq) return -EINVAL; if (atomic64_read(&ring->fence_drv.last_seq) >= seq) return 0; amdgpu_fence_schedule_fallback(ring); - wait_event(ring->fence_drv.fence_queue, ( - (signaled = amdgpu_fence_seq_signaled(ring, seq)))); + wait_event(ring->fence_drv.fence_queue, + amdgpu_fence_seq_signaled(ring, seq)); - if (signaled) - return 0; - else - return -EDEADLK; + return 0; } /** @@ -304,7 +298,7 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) { uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; - if (seq >= ring->fence_drv.sync_seq[ring->idx]) + if (seq >= ring->fence_drv.sync_seq) return -ENOENT; return amdgpu_fence_ring_wait_seq(ring, seq); @@ -322,7 +316,7 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) */ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) { - uint64_t seq = ring->fence_drv.sync_seq[ring->idx]; + uint64_t seq = ring->fence_drv.sync_seq; if (!seq) return 0; @@ -347,7 +341,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) * but it's ok to report slightly wrong fence count here. */ amdgpu_fence_process(ring); - emitted = ring->fence_drv.sync_seq[ring->idx] + emitted = ring->fence_drv.sync_seq - atomic64_read(&ring->fence_drv.last_seq); /* to avoid 32bits warp around */ if (emitted > 0x10000000) @@ -357,68 +351,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) } /** - * amdgpu_fence_need_sync - do we need a semaphore - * - * @fence: amdgpu fence object - * @dst_ring: which ring to check against - * - * Check if the fence needs to be synced against another ring - * (all asics). If so, we need to emit a semaphore. - * Returns true if we need to sync with another ring, false if - * not. - */ -bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *dst_ring) -{ - struct amdgpu_fence_driver *fdrv; - - if (!fence) - return false; - - if (fence->ring == dst_ring) - return false; - - /* we are protected by the ring mutex */ - fdrv = &dst_ring->fence_drv; - if (fence->seq <= fdrv->sync_seq[fence->ring->idx]) - return false; - - return true; -} - -/** - * amdgpu_fence_note_sync - record the sync point - * - * @fence: amdgpu fence object - * @dst_ring: which ring to check against - * - * Note the sequence number at which point the fence will - * be synced with the requested ring (all asics). - */ -void amdgpu_fence_note_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *dst_ring) -{ - struct amdgpu_fence_driver *dst, *src; - unsigned i; - - if (!fence) - return; - - if (fence->ring == dst_ring) - return; - - /* we are protected by the ring mutex */ - src = &fence->ring->fence_drv; - dst = &dst_ring->fence_drv; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (i == dst_ring->idx) - continue; - - dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); - } -} - -/** * amdgpu_fence_driver_start_ring - make the fence driver * ready for use on the requested ring. * @@ -471,13 +403,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, */ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) { - int i, r; + long timeout; + int r; ring->fence_drv.cpu_addr = NULL; ring->fence_drv.gpu_addr = 0; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - ring->fence_drv.sync_seq[i] = 0; - + ring->fence_drv.sync_seq = 0; atomic64_set(&ring->fence_drv.last_seq, 0); ring->fence_drv.initialized = false; @@ -486,26 +417,24 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) init_waitqueue_head(&ring->fence_drv.fence_queue); - if (amdgpu_enable_scheduler) { - long timeout = msecs_to_jiffies(amdgpu_lockup_timeout); - if (timeout == 0) { - /* - * FIXME: - * Delayed workqueue cannot use it directly, - * so the scheduler will not use delayed workqueue if - * MAX_SCHEDULE_TIMEOUT is set. - * Currently keep it simple and silly. - */ - timeout = MAX_SCHEDULE_TIMEOUT; - } - r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, - amdgpu_sched_hw_submission, - timeout, ring->name); - if (r) { - DRM_ERROR("Failed to create scheduler on ring %s.\n", - ring->name); - return r; - } + timeout = msecs_to_jiffies(amdgpu_lockup_timeout); + if (timeout == 0) { + /* + * FIXME: + * Delayed workqueue cannot use it directly, + * so the scheduler will not use delayed workqueue if + * MAX_SCHEDULE_TIMEOUT is set. + * Currently keep it simple and silly. + */ + timeout = MAX_SCHEDULE_TIMEOUT; + } + r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, + amdgpu_sched_hw_submission, + timeout, ring->name); + if (r) { + DRM_ERROR("Failed to create scheduler on ring %s.\n", + ring->name); + return r; } return 0; @@ -552,7 +481,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) kmem_cache_destroy(amdgpu_fence_slab); - mutex_lock(&adev->ring_lock); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; @@ -570,7 +498,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) del_timer_sync(&ring->fence_drv.fallback_timer); ring->fence_drv.initialized = false; } - mutex_unlock(&adev->ring_lock); } /** @@ -585,7 +512,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) { int i, r; - mutex_lock(&adev->ring_lock); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->fence_drv.initialized) @@ -602,7 +528,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); } - mutex_unlock(&adev->ring_lock); } /** @@ -621,7 +546,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) { int i; - mutex_lock(&adev->ring_lock); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->fence_drv.initialized) @@ -631,7 +555,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) amdgpu_irq_get(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); } - mutex_unlock(&adev->ring_lock); } /** @@ -651,7 +574,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; - amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]); + amdgpu_fence_write(ring, ring->fence_drv.sync_seq); } } @@ -781,7 +704,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - int i, j; + int i; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -794,28 +717,38 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) seq_printf(m, "Last signaled fence 0x%016llx\n", (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); seq_printf(m, "Last emitted 0x%016llx\n", - ring->fence_drv.sync_seq[i]); - - for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { - struct amdgpu_ring *other = adev->rings[j]; - if (i != j && other && other->fence_drv.initialized && - ring->fence_drv.sync_seq[j]) - seq_printf(m, "Last sync to ring %d 0x%016llx\n", - j, ring->fence_drv.sync_seq[j]); - } + ring->fence_drv.sync_seq); } return 0; } +/** + * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset + * + * Manually trigger a gpu reset at the next fence wait. + */ +static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + + seq_printf(m, "gpu reset\n"); + amdgpu_gpu_reset(adev); + + return 0; +} + static struct drm_info_list amdgpu_debugfs_fence_list[] = { {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, + {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL} }; #endif int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1); + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); #else return 0; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7380f782cd14..2e26a517f2d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -83,24 +83,32 @@ retry: return r; } *obj = &robj->gem_base; - robj->pid = task_pid_nr(current); - - mutex_lock(&adev->gem.mutex); - list_add_tail(&robj->list, &adev->gem.objects); - mutex_unlock(&adev->gem.mutex); return 0; } -int amdgpu_gem_init(struct amdgpu_device *adev) +void amdgpu_gem_force_release(struct amdgpu_device *adev) { - INIT_LIST_HEAD(&adev->gem.objects); - return 0; -} + struct drm_device *ddev = adev->ddev; + struct drm_file *file; -void amdgpu_gem_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_force_delete(adev); + mutex_lock(&ddev->struct_mutex); + + list_for_each_entry(file, &ddev->filelist, lhead) { + struct drm_gem_object *gobj; + int handle; + + WARN_ONCE(1, "Still active user space clients!\n"); + spin_lock(&file->table_lock); + idr_for_each_entry(&file->object_idr, gobj, handle) { + WARN_ONCE(1, "And also active allocations!\n"); + drm_gem_object_unreference(gobj); + } + idr_destroy(&file->object_idr); + spin_unlock(&file->table_lock); + } + + mutex_unlock(&ddev->struct_mutex); } /* @@ -252,6 +260,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, goto handle_lockup; bo = gem_to_amdgpu_bo(gobj); + bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); if (r) goto release_object; @@ -308,7 +318,7 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) || + if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { drm_gem_object_unreference_unlocked(gobj); return -EPERM; @@ -628,7 +638,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, info.bo_size = robj->gem_base.size; info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; - info.domains = robj->initial_domain; + info.domains = robj->prefered_domains; info.domain_flags = robj->flags; amdgpu_bo_unreserve(robj); if (copy_to_user(out, &info, sizeof(info))) @@ -636,14 +646,18 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, break; } case AMDGPU_GEM_OP_SET_PLACEMENT: - if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { + if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { r = -EPERM; amdgpu_bo_unreserve(robj); break; } - robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_CPU); + robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); + robj->allowed_domains = robj->prefered_domains; + if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) + robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; + amdgpu_bo_unreserve(robj); break; default: @@ -688,38 +702,73 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, } #if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) +{ + struct drm_gem_object *gobj = ptr; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); + struct seq_file *m = data; + + unsigned domain; + const char *placement; + unsigned pin_count; + + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + switch (domain) { + case AMDGPU_GEM_DOMAIN_VRAM: + placement = "VRAM"; + break; + case AMDGPU_GEM_DOMAIN_GTT: + placement = " GTT"; + break; + case AMDGPU_GEM_DOMAIN_CPU: + default: + placement = " CPU"; + break; + } + seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx", + id, amdgpu_bo_size(bo), placement, + amdgpu_bo_gpu_offset(bo)); + + pin_count = ACCESS_ONCE(bo->pin_count); + if (pin_count) + seq_printf(m, " pin count %d", pin_count); + seq_printf(m, "\n"); + + return 0; +} + static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_bo *rbo; - unsigned i = 0; + struct drm_file *file; + int r; - mutex_lock(&adev->gem.mutex); - list_for_each_entry(rbo, &adev->gem.objects, list) { - unsigned domain; - const char *placement; + r = mutex_lock_interruptible(&dev->struct_mutex); + if (r) + return r; - domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type); - switch (domain) { - case AMDGPU_GEM_DOMAIN_VRAM: - placement = "VRAM"; - break; - case AMDGPU_GEM_DOMAIN_GTT: - placement = " GTT"; - break; - case AMDGPU_GEM_DOMAIN_CPU: - default: - placement = " CPU"; - break; - } - seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", - i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20, - placement, (unsigned long)rbo->pid); - i++; + list_for_each_entry(file, &dev->filelist, lhead) { + struct task_struct *task; + + /* + * Although we have a valid reference on file->pid, that does + * not guarantee that the task_struct who called get_pid() is + * still alive (e.g. get_pid(current) => fork() => exit()). + * Therefore, we need to protect this ->comm access using RCU. + */ + rcu_read_lock(); + task = pid_task(file->pid, PIDTYPE_PID); + seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), + task ? task->comm : "<unknown>"); + rcu_read_unlock(); + + spin_lock(&file->table_lock); + idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m); + spin_unlock(&file->table_lock); } - mutex_unlock(&adev->gem.mutex); + + mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 9e25edafa721..b5bdd5d59b58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -55,10 +55,9 @@ static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); * suballocator. * Returns 0 on success, error on failure. */ -int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, +int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib) { - struct amdgpu_device *adev = ring->adev; int r; if (size) { @@ -75,9 +74,6 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); } - amdgpu_sync_create(&ib->sync); - - ib->ring = ring; ib->vm = vm; return 0; @@ -93,7 +89,6 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, */ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) { - amdgpu_sync_free(adev, &ib->sync, &ib->fence->base); amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base); if (ib->fence) fence_put(&ib->fence->base); @@ -106,6 +101,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) * @num_ibs: number of IBs to schedule * @ibs: IB objects to schedule * @owner: owner for creating the fences + * @f: fence created during this submission * * Schedule an IB on the associated ring (all asics). * Returns 0 on success, error on failure. @@ -120,11 +116,13 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) * a CONST_IB), it will be put on the ring prior to the DE IB. Prior * to SI there was just a DE IB. */ -int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_ib *ibs, void *owner) +int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, + struct amdgpu_ib *ibs, void *owner, + struct fence *last_vm_update, + struct fence **f) { + struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; - struct amdgpu_ring *ring; struct amdgpu_ctx *ctx, *old_ctx; struct amdgpu_vm *vm; unsigned i; @@ -133,7 +131,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, if (num_ibs == 0) return -EINVAL; - ring = ibs->ring; ctx = ibs->ctx; vm = ibs->vm; @@ -141,36 +138,21 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, dev_err(adev->dev, "couldn't schedule ib\n"); return -EINVAL; } - r = amdgpu_sync_wait(&ibs->sync); - if (r) { - dev_err(adev->dev, "IB sync failed (%d).\n", r); - return r; - } - r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); - if (r) { - dev_err(adev->dev, "scheduling IB failed (%d).\n", r); - return r; - } - if (vm) { - /* grab a vm id if necessary */ - r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync); - if (r) { - amdgpu_ring_unlock_undo(ring); - return r; - } + if (vm && !ibs->grabbed_vmid) { + dev_err(adev->dev, "VM IB without ID\n"); + return -EINVAL; } - r = amdgpu_sync_rings(&ibs->sync, ring); + r = amdgpu_ring_alloc(ring, 256 * num_ibs); if (r) { - amdgpu_ring_unlock_undo(ring); - dev_err(adev->dev, "failed to sync rings (%d)\n", r); + dev_err(adev->dev, "scheduling IB failed (%d).\n", r); return r; } if (vm) { /* do context switch */ - amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); + amdgpu_vm_flush(ring, vm, last_vm_update); if (ring->funcs->emit_gds_switch) amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, @@ -186,9 +168,9 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; - if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) { + if (ib->ctx != ctx || ib->vm != vm) { ring->current_ctx = old_ctx; - amdgpu_ring_unlock_undo(ring); + amdgpu_ring_undo(ring); return -EINVAL; } amdgpu_ring_emit_ib(ring, ib); @@ -199,14 +181,10 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); ring->current_ctx = old_ctx; - amdgpu_ring_unlock_undo(ring); + amdgpu_ring_undo(ring); return r; } - if (!amdgpu_enable_scheduler && ib->ctx) - ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, - &ib->fence->base); - /* wrap the last IB with fence */ if (ib->user) { uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); @@ -215,10 +193,10 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, AMDGPU_FENCE_FLAG_64BIT); } - if (ib->vm) - amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); + if (f) + *f = fence_get(&ib->fence->base); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c new file mode 100644 index 000000000000..f29bbb96a881 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -0,0 +1,159 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <drm/drmP.h> +#include "amdgpu.h" +#include "amdgpu_trace.h" + +int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, + struct amdgpu_job **job) +{ + size_t size = sizeof(struct amdgpu_job); + + if (num_ibs == 0) + return -EINVAL; + + size += sizeof(struct amdgpu_ib) * num_ibs; + + *job = kzalloc(size, GFP_KERNEL); + if (!*job) + return -ENOMEM; + + (*job)->adev = adev; + (*job)->ibs = (void *)&(*job)[1]; + (*job)->num_ibs = num_ibs; + + amdgpu_sync_create(&(*job)->sync); + + return 0; +} + +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, + struct amdgpu_job **job) +{ + int r; + + r = amdgpu_job_alloc(adev, 1, job); + if (r) + return r; + + r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); + if (r) + kfree(*job); + + return r; +} + +void amdgpu_job_free(struct amdgpu_job *job) +{ + unsigned i; + + for (i = 0; i < job->num_ibs; ++i) + amdgpu_ib_free(job->adev, &job->ibs[i]); + + amdgpu_bo_unref(&job->uf.bo); + amdgpu_sync_free(&job->sync); + kfree(job); +} + +int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, + struct amd_sched_entity *entity, void *owner, + struct fence **f) +{ + job->ring = ring; + job->base.sched = &ring->sched; + job->base.s_entity = entity; + job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); + if (!job->base.s_fence) + return -ENOMEM; + + *f = fence_get(&job->base.s_fence->base); + + job->owner = owner; + amd_sched_entity_push_job(&job->base); + + return 0; +} + +static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) +{ + struct amdgpu_job *job = to_amdgpu_job(sched_job); + struct amdgpu_vm *vm = job->ibs->vm; + + struct fence *fence = amdgpu_sync_get_fence(&job->sync); + + if (fence == NULL && vm && !job->ibs->grabbed_vmid) { + struct amdgpu_ring *ring = job->ring; + int r; + + r = amdgpu_vm_grab_id(vm, ring, &job->sync, + &job->base.s_fence->base); + if (r) + DRM_ERROR("Error getting VM ID (%d)\n", r); + else + job->ibs->grabbed_vmid = true; + + fence = amdgpu_sync_get_fence(&job->sync); + } + + return fence; +} + +static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) +{ + struct fence *fence = NULL; + struct amdgpu_job *job; + int r; + + if (!sched_job) { + DRM_ERROR("job is null\n"); + return NULL; + } + job = to_amdgpu_job(sched_job); + + r = amdgpu_sync_wait(&job->sync); + if (r) { + DRM_ERROR("failed to sync wait (%d)\n", r); + return NULL; + } + + trace_amdgpu_sched_run_job(job); + r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner, + job->sync.last_vm_update, &fence); + if (r) { + DRM_ERROR("Error scheduling IBs (%d)\n", r); + goto err; + } + +err: + amdgpu_job_free(job); + return fence; +} + +struct amd_sched_backend_ops amdgpu_sched_ops = { + .dependency = amdgpu_job_dependency, + .run_job = amdgpu_job_run, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e23843f4d877..7805a8706af7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -447,8 +447,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.max_memory_clock = adev->pm.default_mclk * 10; } dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; - dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * - adev->gfx.config.max_shader_engines; + dev_info.num_rb_pipes = adev->gfx.config.num_rbs; dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; dev_info._pad = 0; dev_info.ids_flags = 0; @@ -727,6 +726,12 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe, /* Get associated drm_crtc: */ crtc = &adev->mode_info.crtcs[pipe]->base; + if (!crtc) { + /* This can occur on driver load if some component fails to + * initialize completely and driver is unloaded */ + DRM_ERROR("Uninitialized crtc %d\n", pipe); + return -EINVAL; + } /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index b1969f2b2038..d7ec9bd6755f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -48,8 +48,7 @@ struct amdgpu_mn { /* protected by adev->mn_lock */ struct hlist_node node; - /* objects protected by lock */ - struct mutex lock; + /* objects protected by mm->mmap_sem */ struct rb_root objects; }; @@ -73,21 +72,19 @@ static void amdgpu_mn_destroy(struct work_struct *work) struct amdgpu_bo *bo, *next_bo; mutex_lock(&adev->mn_lock); - mutex_lock(&rmn->lock); + down_write(&rmn->mm->mmap_sem); hash_del(&rmn->node); rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, it.rb) { - - interval_tree_remove(&node->it, &rmn->objects); list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { bo->mn = NULL; list_del_init(&bo->mn_list); } kfree(node); } - mutex_unlock(&rmn->lock); + up_write(&rmn->mm->mmap_sem); mutex_unlock(&adev->mn_lock); - mmu_notifier_unregister(&rmn->mn, rmn->mm); + mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); kfree(rmn); } @@ -129,8 +126,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end -= 1; - mutex_lock(&rmn->lock); - it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct amdgpu_mn_node *node; @@ -142,7 +137,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, list_for_each_entry(bo, &node->bos, mn_list) { - if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, + end)) continue; r = amdgpu_bo_reserve(bo, true); @@ -164,8 +160,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, amdgpu_bo_unreserve(bo); } } - - mutex_unlock(&rmn->lock); } static const struct mmu_notifier_ops amdgpu_mn_ops = { @@ -186,8 +180,8 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) struct amdgpu_mn *rmn; int r; - down_write(&mm->mmap_sem); mutex_lock(&adev->mn_lock); + down_write(&mm->mmap_sem); hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm) if (rmn->mm == mm) @@ -202,7 +196,6 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->adev = adev; rmn->mm = mm; rmn->mn.ops = &amdgpu_mn_ops; - mutex_init(&rmn->lock); rmn->objects = RB_ROOT; r = __mmu_notifier_register(&rmn->mn, mm); @@ -212,14 +205,14 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm); release_locks: - mutex_unlock(&adev->mn_lock); up_write(&mm->mmap_sem); + mutex_unlock(&adev->mn_lock); return rmn; free_rmn: - mutex_unlock(&adev->mn_lock); up_write(&mm->mmap_sem); + mutex_unlock(&adev->mn_lock); kfree(rmn); return ERR_PTR(r); @@ -249,7 +242,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) INIT_LIST_HEAD(&bos); - mutex_lock(&rmn->lock); + down_write(&rmn->mm->mmap_sem); while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { kfree(node); @@ -263,7 +256,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) if (!node) { node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); if (!node) { - mutex_unlock(&rmn->lock); + up_write(&rmn->mm->mmap_sem); return -ENOMEM; } } @@ -278,7 +271,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) interval_tree_insert(&node->it, &rmn->objects); - mutex_unlock(&rmn->lock); + up_write(&rmn->mm->mmap_sem); return 0; } @@ -297,13 +290,15 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) struct list_head *head; mutex_lock(&adev->mn_lock); + rmn = bo->mn; if (rmn == NULL) { mutex_unlock(&adev->mn_lock); return; } - mutex_lock(&rmn->lock); + down_write(&rmn->mm->mmap_sem); + /* save the next list entry for later */ head = bo->mn_list.next; @@ -317,6 +312,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) kfree(node); } - mutex_unlock(&rmn->lock); + up_write(&rmn->mm->mmap_sem); mutex_unlock(&adev->mn_lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index fdc1be8550da..8d432e6901af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -390,7 +390,6 @@ struct amdgpu_crtc { struct drm_display_mode native_mode; u32 pll_id; /* page flipping */ - struct workqueue_struct *pflip_queue; struct amdgpu_flip_work *pflip_works; enum amdgpu_flip_status pflip_status; int deferred_flip_completion; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b8fbbd7699e4..9a025a77958d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -97,9 +97,6 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); - mutex_lock(&bo->adev->gem.mutex); - list_del_init(&bo->list); - mutex_unlock(&bo->adev->gem.mutex); drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); kfree(bo->metadata); @@ -254,12 +251,15 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bo->adev = adev; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); - bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_CPU | - AMDGPU_GEM_DOMAIN_GDS | - AMDGPU_GEM_DOMAIN_GWS | - AMDGPU_GEM_DOMAIN_OA); + bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU | + AMDGPU_GEM_DOMAIN_GDS | + AMDGPU_GEM_DOMAIN_GWS | + AMDGPU_GEM_DOMAIN_OA); + bo->allowed_domains = bo->prefered_domains; + if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) + bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; bo->flags = flags; @@ -367,7 +367,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, int r, i; unsigned fpfn, lpfn; - if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) return -EPERM; if (WARN_ON_ONCE(min_offset > max_offset)) @@ -470,26 +470,6 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); } -void amdgpu_bo_force_delete(struct amdgpu_device *adev) -{ - struct amdgpu_bo *bo, *n; - - if (list_empty(&adev->gem.objects)) { - return; - } - dev_err(adev->dev, "Userspace still has active objects !\n"); - list_for_each_entry_safe(bo, n, &adev->gem.objects, list) { - dev_err(adev->dev, "%p %p %lu %lu force free\n", - &bo->gem_base, bo, (unsigned long)bo->gem_base.size, - *((unsigned long *)&bo->gem_base.refcount)); - mutex_lock(&bo->adev->gem.mutex); - list_del_init(&bo->list); - mutex_unlock(&bo->adev->gem.mutex); - /* this should unref the ttm bo */ - drm_gem_object_unreference_unlocked(&bo->gem_base); - } -} - int amdgpu_bo_init(struct amdgpu_device *adev) { /* Add an MTRR for the VRAM */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 5107fb291bdb..acc08018c6cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -149,7 +149,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr); int amdgpu_bo_unpin(struct amdgpu_bo *bo); int amdgpu_bo_evict_vram(struct amdgpu_device *adev); -void amdgpu_bo_force_delete(struct amdgpu_device *adev); int amdgpu_bo_init(struct amdgpu_device *adev); void amdgpu_bo_fini(struct amdgpu_device *adev); int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7d8d84eaea4a..d77b2bdbe800 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -119,7 +119,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, level = amdgpu_dpm_get_performance_level(adev); return snprintf(buf, PAGE_SIZE, "%s\n", (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : - (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); + (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : + (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : + (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown"); } else { enum amdgpu_dpm_forced_level level; @@ -146,6 +148,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, level = AMDGPU_DPM_FORCED_LEVEL_HIGH; } else if (strncmp("auto", buf, strlen("auto")) == 0) { level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + } else if (strncmp("manual", buf, strlen("manual")) == 0) { + level = AMDGPU_DPM_FORCED_LEVEL_MANUAL; } else { count = -EINVAL; goto fail; @@ -172,10 +176,293 @@ fail: return count; } +static ssize_t amdgpu_get_pp_num_states(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + struct pp_states_info data; + int i, buf_len; + + if (adev->pp_enabled) + amdgpu_dpm_get_pp_num_states(adev, &data); + + buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); + for (i = 0; i < data.nums; i++) + buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, + (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : + (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : + (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : + (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); + + return buf_len; +} + +static ssize_t amdgpu_get_pp_cur_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + struct pp_states_info data; + enum amd_pm_state_type pm = 0; + int i = 0; + + if (adev->pp_enabled) { + + pm = amdgpu_dpm_get_current_power_state(adev); + amdgpu_dpm_get_pp_num_states(adev, &data); + + for (i = 0; i < data.nums; i++) { + if (pm == data.states[i]) + break; + } + + if (i == data.nums) + i = -EINVAL; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", i); +} + +static ssize_t amdgpu_get_pp_force_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + struct pp_states_info data; + enum amd_pm_state_type pm = 0; + int i; + + if (adev->pp_force_state_enabled && adev->pp_enabled) { + pm = amdgpu_dpm_get_current_power_state(adev); + amdgpu_dpm_get_pp_num_states(adev, &data); + + for (i = 0; i < data.nums; i++) { + if (pm == data.states[i]) + break; + } + + if (i == data.nums) + i = -EINVAL; + + return snprintf(buf, PAGE_SIZE, "%d\n", i); + + } else + return snprintf(buf, PAGE_SIZE, "\n"); +} + +static ssize_t amdgpu_set_pp_force_state(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + enum amd_pm_state_type state = 0; + long idx; + int ret; + + if (strlen(buf) == 1) + adev->pp_force_state_enabled = false; + else { + ret = kstrtol(buf, 0, &idx); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) { + struct pp_states_info data; + amdgpu_dpm_get_pp_num_states(adev, &data); + state = data.states[idx]; + /* only set user selected power states */ + if (state != POWER_STATE_TYPE_INTERNAL_BOOT && + state != POWER_STATE_TYPE_DEFAULT) { + amdgpu_dpm_dispatch_task(adev, + AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); + adev->pp_force_state_enabled = true; + } + } + } +fail: + return count; +} + +static ssize_t amdgpu_get_pp_table(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + char *table = NULL; + int size, i; + + if (adev->pp_enabled) + size = amdgpu_dpm_get_pp_table(adev, &table); + else + return 0; + + if (size >= PAGE_SIZE) + size = PAGE_SIZE - 1; + + for (i = 0; i < size; i++) { + sprintf(buf + i, "%02x", table[i]); + } + sprintf(buf + i, "\n"); + + return size; +} + +static ssize_t amdgpu_set_pp_table(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + if (adev->pp_enabled) + amdgpu_dpm_set_pp_table(adev, buf, count); + + return count; +} + +static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + ssize_t size = 0; + + if (adev->pp_enabled) + size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); + + return size; +} + +static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long level; + + ret = kstrtol(buf, 0, &level); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) + amdgpu_dpm_force_clock_level(adev, PP_SCLK, level); +fail: + return count; +} + +static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + ssize_t size = 0; + + if (adev->pp_enabled) + size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); + + return size; +} + +static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long level; + + ret = kstrtol(buf, 0, &level); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) + amdgpu_dpm_force_clock_level(adev, PP_MCLK, level); +fail: + return count; +} + +static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + ssize_t size = 0; + + if (adev->pp_enabled) + size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); + + return size; +} + +static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long level; + + ret = kstrtol(buf, 0, &level); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) + amdgpu_dpm_force_clock_level(adev, PP_PCIE, level); +fail: + return count; +} + static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, amdgpu_get_dpm_forced_performance_level, amdgpu_set_dpm_forced_performance_level); +static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); +static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); +static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, + amdgpu_get_pp_force_state, + amdgpu_set_pp_force_state); +static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, + amdgpu_get_pp_table, + amdgpu_set_pp_table); +static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, + amdgpu_get_pp_dpm_sclk, + amdgpu_set_pp_dpm_sclk); +static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, + amdgpu_get_pp_dpm_mclk, + amdgpu_set_pp_dpm_mclk); +static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, + amdgpu_get_pp_dpm_pcie, + amdgpu_set_pp_dpm_pcie); static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -623,14 +910,12 @@ force: amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); } - mutex_lock(&adev->ring_lock); - /* update whether vce is active */ ps->vce_active = adev->pm.dpm.vce_active; ret = amdgpu_dpm_pre_set_power_state(adev); if (ret) - goto done; + return; /* update display watermarks based on new power state */ amdgpu_display_bandwidth_update(adev); @@ -667,9 +952,6 @@ force: amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); } } - -done: - mutex_unlock(&adev->ring_lock); } void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) @@ -770,6 +1052,44 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file for dpm state\n"); return ret; } + + if (adev->pp_enabled) { + ret = device_create_file(adev->dev, &dev_attr_pp_num_states); + if (ret) { + DRM_ERROR("failed to create device file pp_num_states\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); + if (ret) { + DRM_ERROR("failed to create device file pp_cur_state\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_force_state); + if (ret) { + DRM_ERROR("failed to create device file pp_force_state\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_table); + if (ret) { + DRM_ERROR("failed to create device file pp_table\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_sclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_mclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_pcie\n"); + return ret; + } + } ret = amdgpu_debugfs_pm_init(adev); if (ret) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); @@ -787,6 +1107,15 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) hwmon_device_unregister(adev->pm.int_hwmon_dev); device_remove_file(adev->dev, &dev_attr_power_dpm_state); device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); + if (adev->pp_enabled) { + device_remove_file(adev->dev, &dev_attr_pp_num_states); + device_remove_file(adev->dev, &dev_attr_pp_cur_state); + device_remove_file(adev->dev, &dev_attr_pp_force_state); + device_remove_file(adev->dev, &dev_attr_pp_table); + device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); + } } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) @@ -802,13 +1131,11 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) int i = 0; amdgpu_display_bandwidth_update(adev); - mutex_lock(&adev->ring_lock); - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->ready) - amdgpu_fence_wait_empty(ring); - } - mutex_unlock(&adev->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->ready) + amdgpu_fence_wait_empty(ring); + } amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 59f735a933a9..be6388f73ba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -73,10 +73,6 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, if (ret) return ERR_PTR(ret); - mutex_lock(&adev->gem.mutex); - list_add_tail(&bo->list, &adev->gem.objects); - mutex_unlock(&adev->gem.mutex); - return &bo->gem_base; } @@ -121,7 +117,7 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, { struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); - if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) return ERR_PTR(-EPERM); return drm_gem_prime_export(dev, gobj, flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index d1f234dd2126..56c07e3fdb33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -49,28 +49,6 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); /** - * amdgpu_ring_free_size - update the free size - * - * @adev: amdgpu_device pointer - * @ring: amdgpu_ring structure holding ring information - * - * Update the free dw slots in the ring buffer (all asics). - */ -void amdgpu_ring_free_size(struct amdgpu_ring *ring) -{ - uint32_t rptr = amdgpu_ring_get_rptr(ring); - - /* This works because ring_size is a power of 2 */ - ring->ring_free_dw = rptr + (ring->ring_size / 4); - ring->ring_free_dw -= ring->wptr; - ring->ring_free_dw &= ring->ptr_mask; - if (!ring->ring_free_dw) { - /* this is an empty ring */ - ring->ring_free_dw = ring->ring_size / 4; - } -} - -/** * amdgpu_ring_alloc - allocate space on the ring buffer * * @adev: amdgpu_device pointer @@ -82,50 +60,18 @@ void amdgpu_ring_free_size(struct amdgpu_ring *ring) */ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) { - int r; - - /* make sure we aren't trying to allocate more space than there is on the ring */ - if (ndw > (ring->ring_size / 4)) - return -ENOMEM; /* Align requested size with padding so unlock_commit can * pad safely */ - amdgpu_ring_free_size(ring); ndw = (ndw + ring->align_mask) & ~ring->align_mask; - while (ndw > (ring->ring_free_dw - 1)) { - amdgpu_ring_free_size(ring); - if (ndw < ring->ring_free_dw) { - break; - } - r = amdgpu_fence_wait_next(ring); - if (r) - return r; - } - ring->count_dw = ndw; - ring->wptr_old = ring->wptr; - return 0; -} -/** - * amdgpu_ring_lock - lock the ring and allocate space on it - * - * @adev: amdgpu_device pointer - * @ring: amdgpu_ring structure holding ring information - * @ndw: number of dwords to allocate in the ring buffer - * - * Lock the ring and allocate @ndw dwords in the ring buffer - * (all asics). - * Returns 0 on success, error on failure. - */ -int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw) -{ - int r; + /* Make sure we aren't trying to allocate more space + * than the maximum for one submission + */ + if (WARN_ON_ONCE(ndw > ring->max_dw)) + return -ENOMEM; - mutex_lock(ring->ring_lock); - r = amdgpu_ring_alloc(ring, ndw); - if (r) { - mutex_unlock(ring->ring_lock); - return r; - } + ring->count_dw = ndw; + ring->wptr_old = ring->wptr; return 0; } @@ -144,6 +90,19 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) amdgpu_ring_write(ring, ring->nop); } +/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets + * + * @ring: amdgpu_ring structure holding ring information + * @ib: IB to add NOP packets to + * + * This is the generic pad_ib function for rings except SDMA + */ +void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) +{ + while (ib->length_dw & ring->align_mask) + ib->ptr[ib->length_dw++] = ring->nop; +} + /** * amdgpu_ring_commit - tell the GPU to execute the new * commands on the ring buffer @@ -168,20 +127,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) } /** - * amdgpu_ring_unlock_commit - tell the GPU to execute the new - * commands on the ring buffer and unlock it - * - * @ring: amdgpu_ring structure holding ring information - * - * Call amdgpu_ring_commit() then unlock the ring (all asics). - */ -void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring) -{ - amdgpu_ring_commit(ring); - mutex_unlock(ring->ring_lock); -} - -/** * amdgpu_ring_undo - reset the wptr * * @ring: amdgpu_ring structure holding ring information @@ -194,19 +139,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) } /** - * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring - * - * @ring: amdgpu_ring structure holding ring information - * - * Call amdgpu_ring_undo() then unlock the ring (all asics). - */ -void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring) -{ - amdgpu_ring_undo(ring); - mutex_unlock(ring->ring_lock); -} - -/** * amdgpu_ring_backup - Back up the content of a ring * * @ring: the ring we want to back up @@ -218,43 +150,32 @@ unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, { unsigned size, ptr, i; - /* just in case lock the ring */ - mutex_lock(ring->ring_lock); *data = NULL; - if (ring->ring_obj == NULL) { - mutex_unlock(ring->ring_lock); + if (ring->ring_obj == NULL) return 0; - } /* it doesn't make sense to save anything if all fences are signaled */ - if (!amdgpu_fence_count_emitted(ring)) { - mutex_unlock(ring->ring_lock); + if (!amdgpu_fence_count_emitted(ring)) return 0; - } ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); size = ring->wptr + (ring->ring_size / 4); size -= ptr; size &= ring->ptr_mask; - if (size == 0) { - mutex_unlock(ring->ring_lock); + if (size == 0) return 0; - } /* and then save the content of the ring */ *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); - if (!*data) { - mutex_unlock(ring->ring_lock); + if (!*data) return 0; - } for (i = 0; i < size; ++i) { (*data)[i] = ring->ring[ptr++]; ptr &= ring->ptr_mask; } - mutex_unlock(ring->ring_lock); return size; } @@ -276,7 +197,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring, return 0; /* restore the saved ring content */ - r = amdgpu_ring_lock(ring, size); + r = amdgpu_ring_alloc(ring, size); if (r) return r; @@ -284,7 +205,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring, amdgpu_ring_write(ring, data[i]); } - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); kfree(data); return 0; } @@ -352,7 +273,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - ring->ring_lock = &adev->ring_lock; /* Align ring size */ rb_bufsz = order_base_2(ring_size / 8); ring_size = (1 << (rb_bufsz + 1)) * 4; @@ -389,7 +309,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } } ring->ptr_mask = (ring->ring_size / 4) - 1; - ring->ring_free_dw = ring->ring_size / 4; + ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4, + amdgpu_sched_hw_submission); if (amdgpu_debugfs_ring_init(adev, ring)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); @@ -410,15 +331,10 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) int r; struct amdgpu_bo *ring_obj; - if (ring->ring_lock == NULL) - return; - - mutex_lock(ring->ring_lock); ring_obj = ring->ring_obj; ring->ready = false; ring->ring = NULL; ring->ring_obj = NULL; - mutex_unlock(ring->ring_lock); amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); @@ -474,29 +390,18 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); uint32_t rptr, wptr, rptr_next; - unsigned count, i, j; - - amdgpu_ring_free_size(ring); - count = (ring->ring_size / 4) - ring->ring_free_dw; + unsigned i; wptr = amdgpu_ring_get_wptr(ring); - seq_printf(m, "wptr: 0x%08x [%5d]\n", - wptr, wptr); + seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); rptr = amdgpu_ring_get_rptr(ring); - seq_printf(m, "rptr: 0x%08x [%5d]\n", - rptr, rptr); - rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); + seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); + seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); - seq_printf(m, "last semaphore signal addr : 0x%016llx\n", - ring->last_semaphore_signal_addr); - seq_printf(m, "last semaphore wait addr : 0x%016llx\n", - ring->last_semaphore_wait_addr); - seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); - seq_printf(m, "%u dwords in ring\n", count); if (!ring->ready) return 0; @@ -505,11 +410,20 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) * packet that is the root issue */ i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; - for (j = 0; j <= (count + 32); j++) { + while (i != rptr) { + seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); + if (i == rptr) + seq_puts(m, " *"); + if (i == rptr_next) + seq_puts(m, " #"); + seq_puts(m, "\n"); + i = (i + 1) & ring->ptr_mask; + } + while (i != wptr) { seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (rptr == i) + if (i == rptr) seq_puts(m, " *"); - if (rptr_next == i) + if (i == rptr_next) seq_puts(m, " #"); seq_puts(m, "\n"); i = (i + 1) & ring->ptr_mask; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8b88edb0434b..7d8f8f1e3f7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -321,8 +321,11 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, int i, r; signed long t; - BUG_ON(align > sa_manager->align); - BUG_ON(size > sa_manager->size); + if (WARN_ON_ONCE(align > sa_manager->align)) + return -EINVAL; + + if (WARN_ON_ONCE(size > sa_manager->size)) + return -EINVAL; *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL); if ((*sa_bo) == NULL) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c deleted file mode 100644 index 438c05254695..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#include <linux/kthread.h> -#include <linux/wait.h> -#include <linux/sched.h> -#include <drm/drmP.h> -#include "amdgpu.h" -#include "amdgpu_trace.h" - -static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) -{ - struct amdgpu_job *job = to_amdgpu_job(sched_job); - return amdgpu_sync_get_fence(&job->ibs->sync); -} - -static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) -{ - struct amdgpu_fence *fence = NULL; - struct amdgpu_job *job; - int r; - - if (!sched_job) { - DRM_ERROR("job is null\n"); - return NULL; - } - job = to_amdgpu_job(sched_job); - trace_amdgpu_sched_run_job(job); - r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); - if (r) { - DRM_ERROR("Error scheduling IBs (%d)\n", r); - goto err; - } - - fence = job->ibs[job->num_ibs - 1].fence; - fence_get(&fence->base); - -err: - if (job->free_job) - job->free_job(job); - - kfree(job); - return fence ? &fence->base : NULL; -} - -struct amd_sched_backend_ops amdgpu_sched_ops = { - .dependency = amdgpu_sched_dependency, - .run_job = amdgpu_sched_run_job, -}; - -int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_ib *ibs, - unsigned num_ibs, - int (*free_job)(struct amdgpu_job *), - void *owner, - struct fence **f) -{ - int r = 0; - if (amdgpu_enable_scheduler) { - struct amdgpu_job *job = - kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); - if (!job) - return -ENOMEM; - job->base.sched = &ring->sched; - job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; - job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); - if (!job->base.s_fence) { - kfree(job); - return -ENOMEM; - } - *f = fence_get(&job->base.s_fence->base); - - job->adev = adev; - job->ibs = ibs; - job->num_ibs = num_ibs; - job->owner = owner; - job->free_job = free_job; - amd_sched_entity_push_job(&job->base); - } else { - r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); - if (r) - return r; - *f = fence_get(&ibs[num_ibs - 1].fence->base); - } - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c deleted file mode 100644 index 1caaf201b708..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2011 Christian König. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - */ -/* - * Authors: - * Christian König <deathsimple@vodafone.de> - */ -#include <drm/drmP.h> -#include "amdgpu.h" -#include "amdgpu_trace.h" - -int amdgpu_semaphore_create(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore) -{ - int r; - - *semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL); - if (*semaphore == NULL) { - return -ENOMEM; - } - r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, - &(*semaphore)->sa_bo, 8, 8); - if (r) { - kfree(*semaphore); - *semaphore = NULL; - return r; - } - (*semaphore)->waiters = 0; - (*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo); - - *((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; - - return 0; -} - -bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore) -{ - trace_amdgpu_semaphore_signale(ring->idx, semaphore); - - if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) { - --semaphore->waiters; - - /* for debugging lockup only, used by sysfs debug files */ - ring->last_semaphore_signal_addr = semaphore->gpu_addr; - return true; - } - return false; -} - -bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore) -{ - trace_amdgpu_semaphore_wait(ring->idx, semaphore); - - if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) { - ++semaphore->waiters; - - /* for debugging lockup only, used by sysfs debug files */ - ring->last_semaphore_wait_addr = semaphore->gpu_addr; - return true; - } - return false; -} - -void amdgpu_semaphore_free(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore, - struct fence *fence) -{ - if (semaphore == NULL || *semaphore == NULL) { - return; - } - if ((*semaphore)->waiters > 0) { - dev_err(adev->dev, "semaphore %p has more waiters than signalers," - " hardware lockup imminent!\n", *semaphore); - } - amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence); - kfree(*semaphore); - *semaphore = NULL; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 181ce39ef5e5..c15be00de904 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -46,14 +46,6 @@ struct amdgpu_sync_entry { */ void amdgpu_sync_create(struct amdgpu_sync *sync) { - unsigned i; - - for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) - sync->semaphores[i] = NULL; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - sync->sync_to[i] = NULL; - hash_init(sync->fences); sync->last_vm_update = NULL; } @@ -107,7 +99,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct fence *f) { struct amdgpu_sync_entry *e; - struct amdgpu_fence *fence; if (!f) return 0; @@ -116,27 +107,20 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) amdgpu_sync_keep_later(&sync->last_vm_update, f); - fence = to_amdgpu_fence(f); - if (!fence || fence->ring->adev != adev) { - hash_for_each_possible(sync->fences, e, node, f->context) { - if (unlikely(e->fence->context != f->context)) - continue; - - amdgpu_sync_keep_later(&e->fence, f); - return 0; - } - - e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); - if (!e) - return -ENOMEM; + hash_for_each_possible(sync->fences, e, node, f->context) { + if (unlikely(e->fence->context != f->context)) + continue; - hash_add(sync->fences, &e->node, f->context); - e->fence = fence_get(f); + amdgpu_sync_keep_later(&e->fence, f); return 0; } - amdgpu_sync_keep_later(&sync->sync_to[fence->ring->idx], f); + e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); + if (!e) + return -ENOMEM; + hash_add(sync->fences, &e->node, f->context); + e->fence = fence_get(f); return 0; } @@ -153,13 +137,13 @@ static void *amdgpu_sync_get_owner(struct fence *f) } /** - * amdgpu_sync_resv - use the semaphores to sync to a reservation object + * amdgpu_sync_resv - sync to a reservation object * * @sync: sync object to add fences from reservation object to * @resv: reservation object with embedded fence * @shared: true if we should only sync to the exclusive fence * - * Sync to the fence using the semaphore objects + * Sync to the fence */ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, @@ -250,123 +234,17 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync) kfree(e); } - if (amdgpu_enable_semaphores) - return 0; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct fence *fence = sync->sync_to[i]; - if (!fence) - continue; - - r = fence_wait(fence, false); - if (r) - return r; - } - - return 0; -} - -/** - * amdgpu_sync_rings - sync ring to all registered fences - * - * @sync: sync object to use - * @ring: ring that needs sync - * - * Ensure that all registered fences are signaled before letting - * the ring continue. The caller must hold the ring lock. - */ -int amdgpu_sync_rings(struct amdgpu_sync *sync, - struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - unsigned count = 0; - int i, r; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *other = adev->rings[i]; - struct amdgpu_semaphore *semaphore; - struct amdgpu_fence *fence; - - if (!sync->sync_to[i]) - continue; - - fence = to_amdgpu_fence(sync->sync_to[i]); - - /* check if we really need to sync */ - if (!amdgpu_enable_scheduler && - !amdgpu_fence_need_sync(fence, ring)) - continue; - - /* prevent GPU deadlocks */ - if (!other->ready) { - dev_err(adev->dev, "Syncing to a disabled ring!"); - return -EINVAL; - } - - if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { - r = fence_wait(sync->sync_to[i], true); - if (r) - return r; - continue; - } - - if (count >= AMDGPU_NUM_SYNCS) { - /* not enough room, wait manually */ - r = fence_wait(&fence->base, false); - if (r) - return r; - continue; - } - r = amdgpu_semaphore_create(adev, &semaphore); - if (r) - return r; - - sync->semaphores[count++] = semaphore; - - /* allocate enough space for sync command */ - r = amdgpu_ring_alloc(other, 16); - if (r) - return r; - - /* emit the signal semaphore */ - if (!amdgpu_semaphore_emit_signal(other, semaphore)) { - /* signaling wasn't successful wait manually */ - amdgpu_ring_undo(other); - r = fence_wait(&fence->base, false); - if (r) - return r; - continue; - } - - /* we assume caller has already allocated space on waiters ring */ - if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { - /* waiting wasn't successful wait manually */ - amdgpu_ring_undo(other); - r = fence_wait(&fence->base, false); - if (r) - return r; - continue; - } - - amdgpu_ring_commit(other); - amdgpu_fence_note_sync(fence, ring); - } - return 0; } /** * amdgpu_sync_free - free the sync object * - * @adev: amdgpu_device pointer * @sync: sync object to use - * @fence: fence to use for the free * - * Free the sync object by freeing all semaphores in it. + * Free the sync object. */ -void amdgpu_sync_free(struct amdgpu_device *adev, - struct amdgpu_sync *sync, - struct fence *fence) +void amdgpu_sync_free(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; @@ -378,11 +256,5 @@ void amdgpu_sync_free(struct amdgpu_device *adev, kfree(e); } - for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) - amdgpu_semaphore_free(adev, &sync->semaphores[i], fence); - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - fence_put(sync->sync_to[i]); - fence_put(sync->last_vm_update); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 4865615e9c06..05a53f4fc334 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -238,144 +238,10 @@ void amdgpu_test_moves(struct amdgpu_device *adev) amdgpu_do_test_moves(adev); } -static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct fence **fence) -{ - uint32_t handle = ring->idx ^ 0xdeafbeef; - int r; - - if (ring == &adev->uvd.ring) { - r = amdgpu_uvd_get_create_msg(ring, handle, NULL); - if (r) { - DRM_ERROR("Failed to get dummy create msg\n"); - return r; - } - - r = amdgpu_uvd_get_destroy_msg(ring, handle, fence); - if (r) { - DRM_ERROR("Failed to get dummy destroy msg\n"); - return r; - } - - } else if (ring == &adev->vce.ring[0] || - ring == &adev->vce.ring[1]) { - r = amdgpu_vce_get_create_msg(ring, handle, NULL); - if (r) { - DRM_ERROR("Failed to get dummy create msg\n"); - return r; - } - - r = amdgpu_vce_get_destroy_msg(ring, handle, fence); - if (r) { - DRM_ERROR("Failed to get dummy destroy msg\n"); - return r; - } - } else { - struct amdgpu_fence *a_fence = NULL; - r = amdgpu_ring_lock(ring, 64); - if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ring->idx); - return r; - } - amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence); - amdgpu_ring_unlock_commit(ring); - *fence = &a_fence->base; - } - return 0; -} - void amdgpu_test_ring_sync(struct amdgpu_device *adev, struct amdgpu_ring *ringA, struct amdgpu_ring *ringB) { - struct fence *fence1 = NULL, *fence2 = NULL; - struct amdgpu_semaphore *semaphore = NULL; - int r; - - r = amdgpu_semaphore_create(adev, &semaphore); - if (r) { - DRM_ERROR("Failed to create semaphore\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringA, 64); - if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); - goto out_cleanup; - } - amdgpu_semaphore_emit_wait(ringA, semaphore); - amdgpu_ring_unlock_commit(ringA); - - r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1); - if (r) - goto out_cleanup; - - r = amdgpu_ring_lock(ringA, 64); - if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); - goto out_cleanup; - } - amdgpu_semaphore_emit_wait(ringA, semaphore); - amdgpu_ring_unlock_commit(ringA); - - r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2); - if (r) - goto out_cleanup; - - mdelay(1000); - - if (fence_is_signaled(fence1)) { - DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringB, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringB); - goto out_cleanup; - } - amdgpu_semaphore_emit_signal(ringB, semaphore); - amdgpu_ring_unlock_commit(ringB); - - r = fence_wait(fence1, false); - if (r) { - DRM_ERROR("Failed to wait for sync fence 1\n"); - goto out_cleanup; - } - - mdelay(1000); - - if (fence_is_signaled(fence2)) { - DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringB, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringB); - goto out_cleanup; - } - amdgpu_semaphore_emit_signal(ringB, semaphore); - amdgpu_ring_unlock_commit(ringB); - - r = fence_wait(fence2, false); - if (r) { - DRM_ERROR("Failed to wait for sync fence 1\n"); - goto out_cleanup; - } - -out_cleanup: - amdgpu_semaphore_free(adev, &semaphore, NULL); - - if (fence1) - fence_put(fence1); - - if (fence2) - fence_put(fence2); - - if (r) - printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); } static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, @@ -383,109 +249,6 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, struct amdgpu_ring *ringB, struct amdgpu_ring *ringC) { - struct fence *fenceA = NULL, *fenceB = NULL; - struct amdgpu_semaphore *semaphore = NULL; - bool sigA, sigB; - int i, r; - - r = amdgpu_semaphore_create(adev, &semaphore); - if (r) { - DRM_ERROR("Failed to create semaphore\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringA, 64); - if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); - goto out_cleanup; - } - amdgpu_semaphore_emit_wait(ringA, semaphore); - amdgpu_ring_unlock_commit(ringA); - - r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA); - if (r) - goto out_cleanup; - - r = amdgpu_ring_lock(ringB, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); - goto out_cleanup; - } - amdgpu_semaphore_emit_wait(ringB, semaphore); - amdgpu_ring_unlock_commit(ringB); - r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB); - if (r) - goto out_cleanup; - - mdelay(1000); - - if (fence_is_signaled(fenceA)) { - DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); - goto out_cleanup; - } - if (fence_is_signaled(fenceB)) { - DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); - goto out_cleanup; - } - - r = amdgpu_ring_lock(ringC, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringC); - goto out_cleanup; - } - amdgpu_semaphore_emit_signal(ringC, semaphore); - amdgpu_ring_unlock_commit(ringC); - - for (i = 0; i < 30; ++i) { - mdelay(100); - sigA = fence_is_signaled(fenceA); - sigB = fence_is_signaled(fenceB); - if (sigA || sigB) - break; - } - - if (!sigA && !sigB) { - DRM_ERROR("Neither fence A nor B has been signaled\n"); - goto out_cleanup; - } else if (sigA && sigB) { - DRM_ERROR("Both fence A and B has been signaled\n"); - goto out_cleanup; - } - - DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); - - r = amdgpu_ring_lock(ringC, 64); - if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringC); - goto out_cleanup; - } - amdgpu_semaphore_emit_signal(ringC, semaphore); - amdgpu_ring_unlock_commit(ringC); - - mdelay(1000); - - r = fence_wait(fenceA, false); - if (r) { - DRM_ERROR("Failed to wait for sync fence A\n"); - goto out_cleanup; - } - r = fence_wait(fenceB, false); - if (r) { - DRM_ERROR("Failed to wait for sync fence B\n"); - goto out_cleanup; - } - -out_cleanup: - amdgpu_semaphore_free(adev, &semaphore, NULL); - - if (fenceA) - fence_put(fenceA); - - if (fenceB) - fence_put(fenceB); - - if (r) - printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); } static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 8f9834ab1bd5..9ca3735c563c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -38,10 +38,10 @@ TRACE_EVENT(amdgpu_cs, TP_fast_assign( __entry->bo_list = p->bo_list; - __entry->ring = p->ibs[i].ring->idx; - __entry->dw = p->ibs[i].length_dw; + __entry->ring = p->job->ring->idx; + __entry->dw = p->job->ibs[i].length_dw; __entry->fences = amdgpu_fence_count_emitted( - p->ibs[i].ring); + p->job->ring); ), TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", __entry->bo_list, __entry->ring, __entry->dw, @@ -65,7 +65,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __entry->sched_job = &job->base; __entry->ib = job->ibs; __entry->fence = &job->base.s_fence->base; - __entry->ring_name = job->ibs[0].ring->name; + __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", @@ -90,7 +90,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __entry->sched_job = &job->base; __entry->ib = job->ibs; __entry->fence = &job->base.s_fence->base; - __entry->ring_name = job->ibs[0].ring->name; + __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", @@ -100,18 +100,21 @@ TRACE_EVENT(amdgpu_sched_run_job, TRACE_EVENT(amdgpu_vm_grab_id, - TP_PROTO(unsigned vmid, int ring), - TP_ARGS(vmid, ring), + TP_PROTO(struct amdgpu_vm *vm, unsigned vmid, int ring), + TP_ARGS(vm, vmid, ring), TP_STRUCT__entry( + __field(struct amdgpu_vm *, vm) __field(u32, vmid) __field(u32, ring) ), TP_fast_assign( + __entry->vm = vm; __entry->vmid = vmid; __entry->ring = ring; ), - TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) + TP_printk("vm=%p, id=%u, ring=%u", __entry->vm, __entry->vmid, + __entry->ring) ); TRACE_EVENT(amdgpu_vm_bo_map, @@ -247,42 +250,6 @@ TRACE_EVENT(amdgpu_bo_list_set, TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) ); -DECLARE_EVENT_CLASS(amdgpu_semaphore_request, - - TP_PROTO(int ring, struct amdgpu_semaphore *sem), - - TP_ARGS(ring, sem), - - TP_STRUCT__entry( - __field(int, ring) - __field(signed, waiters) - __field(uint64_t, gpu_addr) - ), - - TP_fast_assign( - __entry->ring = ring; - __entry->waiters = sem->waiters; - __entry->gpu_addr = sem->gpu_addr; - ), - - TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring, - __entry->waiters, __entry->gpu_addr) -); - -DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale, - - TP_PROTO(int ring, struct amdgpu_semaphore *sem), - - TP_ARGS(ring, sem) -); - -DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait, - - TP_PROTO(int ring, struct amdgpu_semaphore *sem), - - TP_ARGS(ring, sem) -); - #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 55cf05e1c81c..e52fc641edfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -77,6 +77,8 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) static int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; + struct amdgpu_ring *ring; + struct amd_sched_rq *rq; int r; adev->mman.mem_global_referenced = false; @@ -106,13 +108,27 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) return r; } + ring = adev->mman.buffer_funcs_ring; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; + r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, + rq, amdgpu_sched_jobs); + if (r != 0) { + DRM_ERROR("Failed setting up TTM BO move run queue.\n"); + drm_global_item_unref(&adev->mman.mem_global_ref); + drm_global_item_unref(&adev->mman.bo_global_ref.ref); + return r; + } + adev->mman.mem_global_referenced = true; + return 0; } static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) { if (adev->mman.mem_global_referenced) { + amd_sched_entity_fini(adev->mman.entity.sched, + &adev->mman.entity); drm_global_item_unref(&adev->mman.bo_global_ref.ref); drm_global_item_unref(&adev->mman.mem_global_ref); adev->mman.mem_global_referenced = false; @@ -499,9 +515,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; - if (current->mm != gtt->usermm) - return -EPERM; - if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { /* check that we only pin down anonymous memory to prevent problems with writeback */ @@ -773,14 +786,33 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, return 0; } -bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) +struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL) + return NULL; + + return gtt->usermm; +} + +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, + unsigned long end) { struct amdgpu_ttm_tt *gtt = (void *)ttm; + unsigned long size; if (gtt == NULL) return false; - return !!gtt->userptr; + if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr) + return false; + + size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; + if (gtt->userptr > end || gtt->userptr + size <= start) + return false; + + return true; } bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) @@ -996,9 +1028,10 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, struct fence **fence) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_job *job; + uint32_t max_bytes; unsigned num_loops, num_dw; - struct amdgpu_ib *ib; unsigned i; int r; @@ -1010,20 +1043,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, while (num_dw & 0x7) num_dw++; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib); - if (r) { - kfree(ib); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + if (r) return r; - } - - ib->length_dw = 0; if (resv) { - r = amdgpu_sync_resv(adev, &ib->sync, resv, + r = amdgpu_sync_resv(adev, &job->sync, resv, AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { DRM_ERROR("sync failed (%d).\n", r); @@ -1034,31 +1059,25 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, for (i = 0; i < num_loops; i++) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); - amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset, - cur_size_in_bytes); + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, + dst_offset, cur_size_in_bytes); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; byte_count -= cur_size_in_bytes; } - amdgpu_vm_pad_ib(adev, ib); - WARN_ON(ib->length_dw > num_dw); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_UNDEFINED, - fence); + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, fence); if (r) goto error_free; - if (!amdgpu_enable_scheduler) { - amdgpu_ib_free(adev, ib); - kfree(ib); - } return 0; + error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 53f987aeeacf..1de82bf4fc79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -91,6 +91,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work); int amdgpu_uvd_sw_init(struct amdgpu_device *adev) { + struct amdgpu_ring *ring; + struct amd_sched_rq *rq; unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; @@ -191,6 +193,15 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->uvd.vcpu_bo); + ring = &adev->uvd.ring; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; + r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, + rq, amdgpu_sched_jobs); + if (r != 0) { + DRM_ERROR("Failed setting up UVD run queue.\n"); + return r; + } + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { atomic_set(&adev->uvd.handles[i], 0); adev->uvd.filp[i] = NULL; @@ -210,6 +221,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) if (adev->uvd.vcpu_bo == NULL) return 0; + amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); + r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); if (!r) { amdgpu_bo_kunmap(adev->uvd.vcpu_bo); @@ -241,7 +254,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) amdgpu_uvd_note_usage(adev); - r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); if (r) { DRM_ERROR("Error destroying UVD (%d)!\n", r); continue; @@ -295,7 +308,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) amdgpu_uvd_note_usage(adev); - r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, handle, + false, &fence); if (r) { DRM_ERROR("Error destroying UVD (%d)!\n", r); continue; @@ -616,7 +630,6 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo *bo; - struct amdgpu_ib *ib; uint32_t cmd, lo, hi; uint64_t start, end; uint64_t addr; @@ -638,9 +651,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; start += addr; - ib = &ctx->parser->ibs[ctx->ib_idx]; - ib->ptr[ctx->data0] = start & 0xFFFFFFFF; - ib->ptr[ctx->data1] = start >> 32; + amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, + lower_32_bits(start)); + amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1, + upper_32_bits(start)); cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; if (cmd < 0x4) { @@ -702,7 +716,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) { - struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; + struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; int i, r; ctx->idx++; @@ -748,7 +762,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) { - struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; + struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; int r; for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { @@ -790,7 +804,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) [0x00000003] = 2048, [0x00000004] = 0xFFFFFFFF, }; - struct amdgpu_ib *ib = &parser->ibs[ib_idx]; + struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; int r; if (ib->length_dw % 16) { @@ -823,22 +837,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return 0; } -static int amdgpu_uvd_free_job( - struct amdgpu_job *job) -{ - amdgpu_ib_free(job->adev, job->ibs); - kfree(job->ibs); - return 0; -} - -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct fence **fence) +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, + bool direct, struct fence **fence) { struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct list_head head; - struct amdgpu_ib *ib = NULL; + struct amdgpu_job *job; + struct amdgpu_ib *ib; struct fence *f = NULL; struct amdgpu_device *adev = ring->adev; uint64_t addr; @@ -862,15 +868,12 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); if (r) goto err; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) { - r = -ENOMEM; - goto err; - } - r = amdgpu_ib_get(ring, NULL, 64, ib); + + r = amdgpu_job_alloc_with_ib(adev, 64, &job); if (r) - goto err1; + goto err; + ib = &job->ibs[0]; addr = amdgpu_bo_gpu_offset(bo); ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); ib->ptr[1] = addr; @@ -882,12 +885,19 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, ib->ptr[i] = PACKET2(0); ib->length_dw = 16; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_uvd_free_job, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); - if (r) - goto err2; + if (direct) { + r = amdgpu_ib_schedule(ring, 1, ib, + AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f); + if (r) + goto err_free; + + amdgpu_job_free(job); + } else { + r = amdgpu_job_submit(job, ring, &adev->uvd.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, &f); + if (r) + goto err_free; + } ttm_eu_fence_buffer_objects(&ticket, &head, f); @@ -895,16 +905,12 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, *fence = fence_get(f); amdgpu_bo_unref(&bo); fence_put(f); - if (amdgpu_enable_scheduler) - return 0; - amdgpu_ib_free(ring->adev, ib); - kfree(ib); return 0; -err2: - amdgpu_ib_free(ring->adev, ib); -err1: - kfree(ib); + +err_free: + amdgpu_job_free(job); + err: ttm_eu_backoff_reservation(&ticket, &head); return r; @@ -959,11 +965,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, amdgpu_bo_kunmap(bo); amdgpu_bo_unreserve(bo); - return amdgpu_uvd_send_msg(ring, bo, fence); + return amdgpu_uvd_send_msg(ring, bo, true, fence); } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence) + bool direct, struct fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; @@ -1001,7 +1007,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, amdgpu_bo_kunmap(bo); amdgpu_bo_unreserve(bo); - return amdgpu_uvd_send_msg(ring, bo, fence); + return amdgpu_uvd_send_msg(ring, bo, direct, fence); } static void amdgpu_uvd_idle_work_handler(struct work_struct *work) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 1724c2c86151..9a3b449081a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -31,7 +31,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev); int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct fence **fence); int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence); + bool direct, struct fence **fence); void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index a745eeeb5d82..39c3aa60381a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -74,6 +74,8 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work); */ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) { + struct amdgpu_ring *ring; + struct amd_sched_rq *rq; const char *fw_name; const struct common_firmware_header *hdr; unsigned ucode_version, version_major, version_minor, binary_id; @@ -170,6 +172,16 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) return r; } + + ring = &adev->vce.ring[0]; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; + r = amd_sched_entity_init(&ring->sched, &adev->vce.entity, + rq, amdgpu_sched_jobs); + if (r != 0) { + DRM_ERROR("Failed setting up VCE run queue.\n"); + return r; + } + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { atomic_set(&adev->vce.handles[i], 0); adev->vce.filp[i] = NULL; @@ -190,6 +202,8 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; + amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); + amdgpu_bo_unref(&adev->vce.vcpu_bo); amdgpu_ring_fini(&adev->vce.ring[0]); @@ -337,7 +351,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) amdgpu_vce_note_usage(adev); - r = amdgpu_vce_get_destroy_msg(ring, handle, NULL); + r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); if (r) DRM_ERROR("Error destroying VCE handle (%d)!\n", r); @@ -346,14 +360,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) } } -static int amdgpu_vce_free_job( - struct amdgpu_job *job) -{ - amdgpu_ib_free(job->adev, job->ibs); - kfree(job->ibs); - return 0; -} - /** * amdgpu_vce_get_create_msg - generate a VCE create msg * @@ -368,21 +374,17 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct fence **fence) { const unsigned ib_size_dw = 1024; - struct amdgpu_ib *ib = NULL; + struct amdgpu_job *job; + struct amdgpu_ib *ib; struct fence *f = NULL; - struct amdgpu_device *adev = ring->adev; uint64_t dummy; int i, r; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - kfree(ib); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + if (r) return r; - } + + ib = &job->ibs[0]; dummy = ib->gpu_addr + 1024; @@ -423,20 +425,19 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vce_free_job, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err; + + amdgpu_job_free(job); if (fence) *fence = fence_get(f); fence_put(f); - if (amdgpu_enable_scheduler) - return 0; + return 0; + err: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } @@ -451,26 +452,20 @@ err: * Close up a stream for HW test or if userspace failed to do so */ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence) + bool direct, struct fence **fence) { const unsigned ib_size_dw = 1024; - struct amdgpu_ib *ib = NULL; + struct amdgpu_job *job; + struct amdgpu_ib *ib; struct fence *f = NULL; - struct amdgpu_device *adev = ring->adev; uint64_t dummy; int i, r; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); - if (r) { - kfree(ib); - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + if (r) return r; - } + ib = &job->ibs[0]; dummy = ib->gpu_addr + 1024; /* stitch together an VCE destroy msg */ @@ -490,20 +485,29 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vce_free_job, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); - if (r) - goto err; + + if (direct) { + r = amdgpu_ib_schedule(ring, 1, ib, + AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); + if (r) + goto err; + + amdgpu_job_free(job); + } else { + r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, &f); + if (r) + goto err; + } + if (fence) *fence = fence_get(f); fence_put(f); - if (amdgpu_enable_scheduler) - return 0; + return 0; + err: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } @@ -521,7 +525,6 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi, unsigned size, uint32_t index) { struct amdgpu_bo_va_mapping *mapping; - struct amdgpu_ib *ib = &p->ibs[ib_idx]; struct amdgpu_bo *bo; uint64_t addr; @@ -550,8 +553,8 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, addr += amdgpu_bo_gpu_offset(bo); addr -= ((uint64_t)size) * ((uint64_t)index); - ib->ptr[lo] = addr & 0xFFFFFFFF; - ib->ptr[hi] = addr >> 32; + amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr)); + amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr)); return 0; } @@ -606,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, */ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) { - struct amdgpu_ib *ib = &p->ibs[ib_idx]; + struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; unsigned fb_idx = 0, bs_idx = 0; int session_idx = -1; bool destroyed = false; @@ -743,30 +746,6 @@ out: } /** - * amdgpu_vce_ring_emit_semaphore - emit a semaphore command - * - * @ring: engine to use - * @semaphore: address of semaphore - * @emit_wait: true=emit wait, false=emit signal - * - */ -bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE); - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); - amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); - if (!emit_wait) - amdgpu_ring_write(ring, VCE_CMD_END); - - return true; -} - -/** * amdgpu_vce_ring_emit_ib - execute indirect buffer * * @ring: engine to use @@ -814,14 +793,14 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - r = amdgpu_ring_lock(ring, 16); + r = amdgpu_ring_alloc(ring, 16); if (r) { DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", ring->idx, r); return r; } amdgpu_ring_write(ring, VCE_CMD_END); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { if (amdgpu_ring_get_rptr(ring) != rptr) @@ -862,7 +841,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_vce_get_destroy_msg(ring, 1, &fence); + r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index ba2da8ee5906..ef99d2370182 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -31,12 +31,9 @@ int amdgpu_vce_resume(struct amdgpu_device *adev); int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct fence **fence); int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence); + bool direct, struct fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); -bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait); void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9599f7559b3d..264c5968a1d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -55,7 +55,7 @@ * * @adev: amdgpu_device pointer * - * Calculate the number of page directory entries (cayman+). + * Calculate the number of page directory entries. */ static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) { @@ -67,7 +67,7 @@ static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * - * Calculate the size of the page directory in bytes (cayman+). + * Calculate the size of the page directory in bytes. */ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) { @@ -89,8 +89,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct amdgpu_bo_list_entry *entry) { entry->robj = vm->page_directory; - entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; entry->priority = 0; entry->tv.bo = &vm->page_directory->tbo; entry->tv.shared = true; @@ -154,29 +152,34 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, * @vm: vm to allocate id for * @ring: ring we want to submit job to * @sync: sync object where we add dependencies + * @fence: fence protecting ID from reuse * * Allocate an id for the vm, adding fences to the sync obj as necessary. - * - * Global mutex must be locked! */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync) + struct amdgpu_sync *sync, struct fence *fence) { - struct fence *best[AMDGPU_MAX_RINGS] = {}; struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; struct amdgpu_device *adev = ring->adev; + struct amdgpu_vm_manager_id *id; + int r; - unsigned choices[2] = {}; - unsigned i; + mutex_lock(&adev->vm_manager.lock); /* check if the id is still valid */ if (vm_id->id) { - unsigned id = vm_id->id; long owner; - owner = atomic_long_read(&adev->vm_manager.ids[id].owner); + id = &adev->vm_manager.ids[vm_id->id]; + owner = atomic_long_read(&id->owner); if (owner == (long)vm) { - trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); + list_move_tail(&id->list, &adev->vm_manager.ids_lru); + trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); + + fence_put(id->active); + id->active = fence_get(fence); + + mutex_unlock(&adev->vm_manager.lock); return 0; } } @@ -184,41 +187,24 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* we definately need to flush */ vm_id->pd_gpu_addr = ~0ll; - /* skip over VMID 0, since it is the system VM */ - for (i = 1; i < adev->vm_manager.nvm; ++i) { - struct fence *fence = adev->vm_manager.ids[i].active; - struct amdgpu_ring *fring; - - if (fence == NULL) { - /* found a free one */ - vm_id->id = i; - trace_amdgpu_vm_grab_id(i, ring->idx); - return 0; - } - - fring = amdgpu_ring_from_fence(fence); - if (best[fring->idx] == NULL || - fence_is_later(best[fring->idx], fence)) { - best[fring->idx] = fence; - choices[fring == ring ? 0 : 1] = i; - } - } + id = list_first_entry(&adev->vm_manager.ids_lru, + struct amdgpu_vm_manager_id, + list); + list_move_tail(&id->list, &adev->vm_manager.ids_lru); + atomic_long_set(&id->owner, (long)vm); - for (i = 0; i < 2; ++i) { - if (choices[i]) { - struct fence *fence; + vm_id->id = id - adev->vm_manager.ids; + trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); - fence = adev->vm_manager.ids[choices[i]].active; - vm_id->id = choices[i]; + r = amdgpu_sync_fence(ring->adev, sync, id->active); - trace_amdgpu_vm_grab_id(choices[i], ring->idx); - return amdgpu_sync_fence(ring->adev, sync, fence); - } + if (!r) { + fence_put(id->active); + id->active = fence_get(fence); } - /* should never happen */ - BUG(); - return -EINVAL; + mutex_unlock(&adev->vm_manager.lock); + return r; } /** @@ -228,9 +214,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, * @vm: vm we want to flush * @updates: last vm update that we waited for * - * Flush the vm (cayman+). - * - * Global and local mutex must be locked! + * Flush the vm. */ void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_vm *vm, @@ -260,36 +244,12 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, } /** - * amdgpu_vm_fence - remember fence for vm - * - * @adev: amdgpu_device pointer - * @vm: vm we want to fence - * @fence: fence to remember - * - * Fence the vm (cayman+). - * Set the fence used to protect page table and id. - * - * Global and local mutex must be locked! - */ -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct fence *fence) -{ - struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); - unsigned vm_id = vm->ids[ring->idx].id; - - fence_put(adev->vm_manager.ids[vm_id].active); - adev->vm_manager.ids[vm_id].active = fence_get(fence); - atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); -} - -/** * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * * @vm: requested vm * @bo: requested buffer object * - * Find @bo inside the requested vm (cayman+). + * Find @bo inside the requested vm. * Search inside the @bos vm list for the requested vm * Returns the found bo_va or NULL if none is found * @@ -312,32 +272,40 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * amdgpu_vm_update_pages - helper to call the right asic function * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: GTT hw access flags * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags - * @gtt_flags: GTT hw access flags * * Traces the parameters and calls the right asic functions * to setup the page table using the DMA. */ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, - uint32_t flags, uint32_t gtt_flags) + uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { - uint64_t src = adev->gart.table_addr + (addr >> 12) * 8; + if ((gtt == &adev->gart) && (flags == gtt_flags)) { + uint64_t src = gtt->table_addr + (addr >> 12) * 8; amdgpu_vm_copy_pte(adev, ib, pe, src, count); - } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) { - amdgpu_vm_write_pte(adev, ib, pe, addr, - count, incr, flags); + } else if (gtt) { + dma_addr_t *pages_addr = gtt->pages_addr; + amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, + count, incr, flags); + + } else if (count < 3) { + amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, + count, incr, flags); } else { amdgpu_vm_set_pte_pde(adev, ib, pe, addr, @@ -345,15 +313,6 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, } } -int amdgpu_vm_free_job(struct amdgpu_job *job) -{ - int i; - for (i = 0; i < job->num_ibs; i++) - amdgpu_ib_free(job->adev, &job->ibs[i]); - kfree(job->ibs); - return 0; -} - /** * amdgpu_vm_clear_bo - initially clear the page dir/table * @@ -363,15 +322,18 @@ int amdgpu_vm_free_job(struct amdgpu_job *job) * need to reserve bo first before calling it. */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, + struct amdgpu_vm *vm, struct amdgpu_bo *bo) { - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; + struct amdgpu_ring *ring; struct fence *fence = NULL; - struct amdgpu_ib *ib; + struct amdgpu_job *job; unsigned entries; uint64_t addr; int r; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + r = reservation_object_reserve_shared(bo->tbo.resv); if (r) return r; @@ -383,56 +345,57 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) + r = amdgpu_job_alloc_with_ib(adev, 64, &job); + if (r) goto error; - r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); + amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries, + 0, 0); + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + + WARN_ON(job->ibs[0].length_dw > 64); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); if (r) goto error_free; - ib->length_dw = 0; - - amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0); - amdgpu_vm_pad_ib(adev, ib); - WARN_ON(ib->length_dw > 64); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_VM, - &fence); - if (!r) - amdgpu_bo_fence(bo, fence, true); + amdgpu_bo_fence(bo, fence, true); fence_put(fence); - if (amdgpu_enable_scheduler) - return 0; + return 0; error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); error: return r; } /** - * amdgpu_vm_map_gart - get the physical address of a gart page + * amdgpu_vm_map_gart - Resolve gart mapping of addr * - * @adev: amdgpu_device pointer + * @pages_addr: optional DMA address to use for lookup * @addr: the unmapped addr * * Look up the physical address of the page that the pte resolves - * to (cayman+). - * Returns the physical address of the page. + * to and return the pointer for the page table entry. */ -uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) +uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) { uint64_t result; - /* page table offset */ - result = adev->gart.pages_addr[addr >> PAGE_SHIFT]; + if (pages_addr) { + /* page table offset */ + result = pages_addr[addr >> PAGE_SHIFT]; - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); + /* in case cpu page size != gpu page size*/ + result |= addr & (~PAGE_MASK); + + } else { + /* No mapping required */ + result = addr; + } + + result &= 0xFFFFFFFFFFFFF000ULL; return result; } @@ -446,45 +409,37 @@ uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) * @end: end of GPU address range * * Allocates new page tables if necessary - * and updates the page directory (cayman+). + * and updates the page directory. * Returns 0 for success, error for failure. - * - * Global and local mutex must be locked! */ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; + struct amdgpu_ring *ring; struct amdgpu_bo *pd = vm->page_directory; uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; + struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *fence = NULL; int r; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + /* padding, etc. */ ndw = 64; /* assume the worst case */ ndw += vm->max_pde_used * 6; - /* update too big for an IB */ - if (ndw > 0xfffff) - return -ENOMEM; - - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) { - kfree(ib); + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); + if (r) return r; - } - ib->length_dw = 0; + + ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -504,9 +459,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, ib, last_pde, - last_pt, count, incr, - AMDGPU_PTE_VALID, 0); + amdgpu_vm_update_pages(adev, NULL, 0, ib, + last_pde, last_pt, + count, incr, + AMDGPU_PTE_VALID); } count = 1; @@ -518,17 +474,16 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, - incr, AMDGPU_PTE_VALID, 0); + amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); if (ib->length_dw != 0) { - amdgpu_vm_pad_ib(adev, ib); - amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); + amdgpu_ring_pad_ib(ring, ib); + amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, + AMDGPU_FENCE_OWNER_VM); WARN_ON(ib->length_dw > ndw); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_VM, - &fence); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); if (r) goto error_free; @@ -536,18 +491,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, fence_put(vm->page_directory_fence); vm->page_directory_fence = fence_get(fence); fence_put(fence); - } - if (!amdgpu_enable_scheduler || ib->length_dw == 0) { - amdgpu_ib_free(adev, ib); - kfree(ib); + } else { + amdgpu_job_free(job); } return 0; error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } @@ -555,20 +507,20 @@ error_free: * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: GTT hw mapping flags * @ib: IB for the update * @pe_start: first PTE to handle * @pe_end: last PTE to handle * @addr: addr those PTEs should point to * @flags: hw mapping flags - * @gtt_flags: GTT hw mapping flags - * - * Global and local mutex must be locked! */ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, struct amdgpu_ib *ib, uint64_t pe_start, uint64_t pe_end, - uint64_t addr, uint32_t flags, - uint32_t gtt_flags) + uint64_t addr, uint32_t flags) { /** * The MC L1 TLB supports variable sized pages, based on a fragment @@ -598,36 +550,39 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, unsigned count; + /* Abort early if there isn't anything to do */ + if (pe_start == pe_end) + return; + /* system pages are non continuously */ - if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) || - (frag_start >= frag_end)) { + if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); + amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start, + addr, count, AMDGPU_GPU_PAGE_SIZE, + flags); return; } /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); + amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr, + count, AMDGPU_GPU_PAGE_SIZE, flags); addr += AMDGPU_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, ib, frag_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags | frag_flags, - gtt_flags); + amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count, + AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += AMDGPU_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, ib, frag_end, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); + amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr, + count, AMDGPU_GPU_PAGE_SIZE, flags); } } @@ -635,122 +590,105 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * amdgpu_vm_update_ptes - make sure that page tables are valid * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: GTT hw mapping flags * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range * @dst: destination address to map to * @flags: mapping flags * - * Update the page tables in the range @start - @end (cayman+). - * - * Global and local mutex must be locked! + * Update the page tables in the range @start - @end. */ -static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_ib *ib, - uint64_t start, uint64_t end, - uint64_t dst, uint32_t flags, - uint32_t gtt_flags) +static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, + struct amdgpu_vm *vm, + struct amdgpu_ib *ib, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) { - uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t last_pte = ~0, last_dst = ~0; - void *owner = AMDGPU_FENCE_OWNER_VM; - unsigned count = 0; - uint64_t addr; + const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - /* sync to everything on unmapping */ - if (!(flags & AMDGPU_PTE_VALID)) - owner = AMDGPU_FENCE_OWNER_UNDEFINED; + uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; + uint64_t addr; /* walk over the address space and update the page tables */ for (addr = start; addr < end; ) { uint64_t pt_idx = addr >> amdgpu_vm_block_size; struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; unsigned nptes; - uint64_t pte; - int r; - - amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner); - r = reservation_object_reserve_shared(pt->tbo.resv); - if (r) - return r; + uint64_t pe_start; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); - pte = amdgpu_bo_gpu_offset(pt); - pte += (addr & mask) * 8; + pe_start = amdgpu_bo_gpu_offset(pt); + pe_start += (addr & mask) * 8; - if ((last_pte + 8 * count) != pte) { + if (last_pe_end != pe_start) { - if (count) { - amdgpu_vm_frag_ptes(adev, ib, last_pte, - last_pte + 8 * count, - last_dst, flags, - gtt_flags); - } + amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, + last_pe_start, last_pe_end, + last_dst, flags); - count = nptes; - last_pte = pte; + last_pe_start = pe_start; + last_pe_end = pe_start + 8 * nptes; last_dst = dst; } else { - count += nptes; + last_pe_end += 8 * nptes; } addr += nptes; dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - if (count) { - amdgpu_vm_frag_ptes(adev, ib, last_pte, - last_pte + 8 * count, - last_dst, flags, gtt_flags); - } - - return 0; + amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, + last_pe_start, last_pe_end, + last_dst, flags); } /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @gtt_flags: flags as they are used for GTT * @vm: requested vm - * @mapping: mapped range and flags to use for the update + * @start: start of mapped range + * @last: last mapped entry + * @flags: flags for the entries * @addr: addr to set the area to - * @gtt_flags: flags as they are used for GTT * @fence: optional resulting fence * - * Fill in the page table entries for @mapping. + * Fill in the page table entries between @start and @last. * Returns 0 for success, -EINVAL for failure. - * - * Object have to be reserved and mutex must be locked! */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, struct amdgpu_vm *vm, - struct amdgpu_bo_va_mapping *mapping, - uint64_t addr, uint32_t gtt_flags, + uint64_t start, uint64_t last, + uint32_t flags, uint64_t addr, struct fence **fence) { - struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; + struct amdgpu_ring *ring; + void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; - uint32_t flags = gtt_flags; + struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *f = NULL; int r; - /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here - * but in case of something, we filter the flags in first place - */ - if (!(mapping->flags & AMDGPU_PTE_READABLE)) - flags &= ~AMDGPU_PTE_READABLE; - if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) - flags &= ~AMDGPU_PTE_WRITEABLE; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - trace_amdgpu_vm_bo_update(mapping); + /* sync to everything on unmapping */ + if (!(flags & AMDGPU_PTE_VALID)) + owner = AMDGPU_FENCE_OWNER_UNDEFINED; - nptes = mapping->it.last - mapping->it.start + 1; + nptes = last - start + 1; /* * reserve space for one command every (1 << BLOCK_SIZE) @@ -761,11 +699,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { + if ((gtt == &adev->gart) && (flags == gtt_flags)) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (flags & AMDGPU_PTE_SYSTEM) { + } else if (gtt) { /* header for write data commands */ ndw += ncmds * 4; @@ -780,38 +718,28 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ndw += 2 * 10; } - /* update too big for an IB */ - if (ndw > 0xfffff) - return -ENOMEM; - - ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); - if (!ib) - return -ENOMEM; - - r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) { - kfree(ib); + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); + if (r) return r; - } - ib->length_dw = 0; + ib = &job->ibs[0]; - r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, - mapping->it.last + 1, addr + mapping->offset, - flags, gtt_flags); + r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, + owner); + if (r) + goto error_free; - if (r) { - amdgpu_ib_free(adev, ib); - kfree(ib); - return r; - } + r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); + if (r) + goto error_free; + + amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1, + addr, flags); - amdgpu_vm_pad_ib(adev, ib); + amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > ndw); - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, - &amdgpu_vm_free_job, - AMDGPU_FENCE_OWNER_VM, - &f); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &f); if (r) goto error_free; @@ -821,19 +749,76 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, *fence = fence_get(f); } fence_put(f); - if (!amdgpu_enable_scheduler) { - amdgpu_ib_free(adev, ib); - kfree(ib); - } return 0; error_free: - amdgpu_ib_free(adev, ib); - kfree(ib); + amdgpu_job_free(job); return r; } /** + * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks + * + * @adev: amdgpu_device pointer + * @gtt: GART instance to use for mapping + * @vm: requested vm + * @mapping: mapped range and flags to use for the update + * @addr: addr to set the area to + * @gtt_flags: flags as they are used for GTT + * @fence: optional resulting fence + * + * Split the mapping into smaller chunks so that each update fits + * into a SDMA IB. + * Returns 0 for success, -EINVAL for failure. + */ +static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, + struct amdgpu_gart *gtt, + uint32_t gtt_flags, + struct amdgpu_vm *vm, + struct amdgpu_bo_va_mapping *mapping, + uint64_t addr, struct fence **fence) +{ + const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; + + uint64_t start = mapping->it.start; + uint32_t flags = gtt_flags; + int r; + + /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here + * but in case of something, we filter the flags in first place + */ + if (!(mapping->flags & AMDGPU_PTE_READABLE)) + flags &= ~AMDGPU_PTE_READABLE; + if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) + flags &= ~AMDGPU_PTE_WRITEABLE; + + trace_amdgpu_vm_bo_update(mapping); + + addr += mapping->offset; + + if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags))) + return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + start, mapping->it.last, + flags, addr, fence); + + while (start != mapping->it.last + 1) { + uint64_t last; + + last = min((uint64_t)mapping->it.last, start + max_size); + r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, + start, last, flags, addr, + fence); + if (r) + return r; + + start = last + 1; + addr += max_size; + } + + return 0; +} + +/** * amdgpu_vm_bo_update - update all BO mappings in the vm page table * * @adev: amdgpu_device pointer @@ -851,14 +836,25 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, { struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; + struct amdgpu_gart *gtt = NULL; uint32_t flags; uint64_t addr; int r; if (mem) { addr = (u64)mem->start << PAGE_SHIFT; - if (mem->mem_type != TTM_PL_TT) + switch (mem->mem_type) { + case TTM_PL_TT: + gtt = &bo_va->bo->adev->gart; + break; + + case TTM_PL_VRAM: addr += adev->vm_manager.vram_base_offset; + break; + + default: + break; + } } else { addr = 0; } @@ -871,8 +867,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, - flags, &bo_va->last_pt_update); + r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr, + &bo_va->last_pt_update); if (r) return r; } @@ -918,7 +914,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); spin_unlock(&vm->freed_lock); - r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); + r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, + 0, NULL); kfree(mapping); if (r) return r; @@ -976,7 +973,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, * @vm: requested vm * @bo: amdgpu buffer object * - * Add @bo into the requested vm (cayman+). + * Add @bo into the requested vm. * Add @bo to the list of bos associated with the vm * Returns newly added bo_va or NULL for failure * @@ -1117,15 +1114,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, */ pt->parent = amdgpu_bo_ref(vm->page_directory); - r = amdgpu_vm_clear_bo(adev, pt); + r = amdgpu_vm_clear_bo(adev, vm, pt); if (r) { amdgpu_bo_unref(&pt); goto error_free; } entry->robj = pt; - entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; entry->priority = 0; entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; @@ -1210,7 +1205,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, * @adev: amdgpu_device pointer * @bo_va: requested bo_va * - * Remove @bo_va->bo from the requested vm (cayman+). + * Remove @bo_va->bo from the requested vm. * * Object have to be reserved! */ @@ -1255,7 +1250,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, * @vm: requested vm * @bo: amdgpu buffer object * - * Mark @bo as invalid (cayman+). + * Mark @bo as invalid. */ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo) @@ -1276,13 +1271,16 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, * @adev: amdgpu_device pointer * @vm: requested vm * - * Init @vm fields (cayman+). + * Init @vm fields. */ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT * 8); unsigned pd_size, pd_entries; + unsigned ring_instance; + struct amdgpu_ring *ring; + struct amd_sched_rq *rq; int i, r; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -1306,6 +1304,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) return -ENOMEM; } + /* create scheduler entity for page table updates */ + + ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); + ring_instance %= adev->vm_manager.vm_pte_num_rings; + ring = adev->vm_manager.vm_pte_rings[ring_instance]; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; + r = amd_sched_entity_init(&ring->sched, &vm->entity, + rq, amdgpu_sched_jobs); + if (r) + return r; + vm->page_directory_fence = NULL; r = amdgpu_bo_create(adev, pd_size, align, true, @@ -1313,22 +1322,27 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) AMDGPU_GEM_CREATE_NO_CPU_ACCESS, NULL, NULL, &vm->page_directory); if (r) - return r; + goto error_free_sched_entity; + r = amdgpu_bo_reserve(vm->page_directory, false); - if (r) { - amdgpu_bo_unref(&vm->page_directory); - vm->page_directory = NULL; - return r; - } - r = amdgpu_vm_clear_bo(adev, vm->page_directory); + if (r) + goto error_free_page_directory; + + r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); amdgpu_bo_unreserve(vm->page_directory); - if (r) { - amdgpu_bo_unref(&vm->page_directory); - vm->page_directory = NULL; - return r; - } + if (r) + goto error_free_page_directory; return 0; + +error_free_page_directory: + amdgpu_bo_unref(&vm->page_directory); + vm->page_directory = NULL; + +error_free_sched_entity: + amd_sched_entity_fini(&ring->sched, &vm->entity); + + return r; } /** @@ -1337,7 +1351,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) * @adev: amdgpu_device pointer * @vm: requested vm * - * Tear down @vm (cayman+). + * Tear down @vm. * Unbind the VM and remove all bos from the vm bo list */ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) @@ -1345,6 +1359,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct amdgpu_bo_va_mapping *mapping, *tmp; int i; + amd_sched_entity_fini(vm->entity.sched, &vm->entity); + if (!RB_EMPTY_ROOT(&vm->va)) { dev_err(adev->dev, "still active bo inside vm\n"); } @@ -1375,6 +1391,27 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } /** + * amdgpu_vm_manager_init - init the VM manager + * + * @adev: amdgpu_device pointer + * + * Initialize the VM manager structures + */ +void amdgpu_vm_manager_init(struct amdgpu_device *adev) +{ + unsigned i; + + INIT_LIST_HEAD(&adev->vm_manager.ids_lru); + + /* skip over VMID 0, since it is the system VM */ + for (i = 1; i < adev->vm_manager.num_ids; ++i) + list_add_tail(&adev->vm_manager.ids[i].list, + &adev->vm_manager.ids_lru); + + atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); +} + +/** * amdgpu_vm_manager_fini - cleanup VM manager * * @adev: amdgpu_device pointer diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index fd9c9588ef46..6b1f0539ce9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1059,257 +1059,6 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static void cik_print_gpu_status_regs(struct amdgpu_device *adev) -{ - dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", - RREG32(mmGRBM_STATUS)); - dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", - RREG32(mmGRBM_STATUS2)); - dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", - RREG32(mmGRBM_STATUS_SE0)); - dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", - RREG32(mmGRBM_STATUS_SE1)); - dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", - RREG32(mmGRBM_STATUS_SE2)); - dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", - RREG32(mmGRBM_STATUS_SE3)); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", - RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); - dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", - RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); - dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); - dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT1)); - dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT2)); - dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT3)); - dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", - RREG32(mmCP_CPF_BUSY_STAT)); - dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPF_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); - dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); - dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPC_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); -} - -/** - * cik_gpu_check_soft_reset - check which blocks are busy - * - * @adev: amdgpu_device pointer - * - * Check which blocks are busy and return the relevant reset - * mask to be used by cik_gpu_soft_reset(). - * Returns a mask of the blocks to be reset. - */ -u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev) -{ - u32 reset_mask = 0; - u32 tmp; - - /* GRBM_STATUS */ - tmp = RREG32(mmGRBM_STATUS); - if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | - GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | - GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | - GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | - GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) - reset_mask |= AMDGPU_RESET_GFX; - - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_CP; - - /* GRBM_STATUS2 */ - tmp = RREG32(mmGRBM_STATUS2); - if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) - reset_mask |= AMDGPU_RESET_RLC; - - /* SDMA0_STATUS_REG */ - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET); - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) - reset_mask |= AMDGPU_RESET_DMA; - - /* SDMA1_STATUS_REG */ - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) - reset_mask |= AMDGPU_RESET_DMA1; - - /* SRBM_STATUS2 */ - tmp = RREG32(mmSRBM_STATUS2); - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) - reset_mask |= AMDGPU_RESET_DMA; - - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) - reset_mask |= AMDGPU_RESET_DMA1; - - /* SRBM_STATUS */ - tmp = RREG32(mmSRBM_STATUS); - - if (tmp & SRBM_STATUS__IH_BUSY_MASK) - reset_mask |= AMDGPU_RESET_IH; - - if (tmp & SRBM_STATUS__SEM_BUSY_MASK) - reset_mask |= AMDGPU_RESET_SEM; - - if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) - reset_mask |= AMDGPU_RESET_GRBM; - - if (tmp & SRBM_STATUS__VMC_BUSY_MASK) - reset_mask |= AMDGPU_RESET_VMC; - - if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | - SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_MC; - - if (amdgpu_display_is_display_hung(adev)) - reset_mask |= AMDGPU_RESET_DISPLAY; - - /* Skip MC reset as it's mostly likely not hung, just busy */ - if (reset_mask & AMDGPU_RESET_MC) { - DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); - reset_mask &= ~AMDGPU_RESET_MC; - } - - return reset_mask; -} - -/** - * cik_gpu_soft_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * @reset_mask: mask of which blocks to reset - * - * Soft reset the blocks specified in @reset_mask. - */ -static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) -{ - struct amdgpu_mode_mc_save save; - u32 grbm_soft_reset = 0, srbm_soft_reset = 0; - u32 tmp; - - if (reset_mask == 0) - return; - - dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask); - - cik_print_gpu_status_regs(adev); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); - - /* disable CG/PG */ - - /* stop the rlc */ - gfx_v7_0_rlc_stop(adev); - - /* Disable GFX parsing/prefetching */ - WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); - - /* Disable MEC parsing/prefetching */ - WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); - - if (reset_mask & AMDGPU_RESET_DMA) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - } - if (reset_mask & AMDGPU_RESET_DMA1) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - } - - gmc_v7_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { - dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } - - if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) - grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK | - GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK; - - if (reset_mask & AMDGPU_RESET_CP) { - grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK; - - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; - } - - if (reset_mask & AMDGPU_RESET_DMA) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - - if (reset_mask & AMDGPU_RESET_DMA1) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; - - if (reset_mask & AMDGPU_RESET_DISPLAY) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; - - if (reset_mask & AMDGPU_RESET_RLC) - grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; - - if (reset_mask & AMDGPU_RESET_SEM) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK; - - if (reset_mask & AMDGPU_RESET_IH) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; - - if (reset_mask & AMDGPU_RESET_GRBM) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; - - if (reset_mask & AMDGPU_RESET_VMC) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK; - - if (!(adev->flags & AMD_IS_APU)) { - if (reset_mask & AMDGPU_RESET_MC) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK; - } - - if (grbm_soft_reset) { - tmp = RREG32(mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~grbm_soft_reset; - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - } - - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - } - - /* Wait a little for things to settle down */ - udelay(50); - - gmc_v7_0_mc_resume(adev, &save); - udelay(50); - - cik_print_gpu_status_regs(adev); -} - struct kv_reset_save_regs { u32 gmcon_reng_execute; u32 gmcon_misc; @@ -1405,45 +1154,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev, static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; struct kv_reset_save_regs kv_save = { 0 }; - u32 tmp, i; + u32 i; dev_info(adev->dev, "GPU pci config reset\n"); - /* disable dpm? */ - - /* disable cg/pg */ - - /* Disable GFX parsing/prefetching */ - WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | - CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); - - /* Disable MEC parsing/prefetching */ - WREG32(mmCP_MEC_CNTL, - CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); - - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - /* XXX other engines? */ - - /* halt the rlc, disable cp internal ints */ - gfx_v7_0_rlc_stop(adev); - - udelay(50); - - /* disable mem access */ - gmc_v7_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { - dev_warn(adev->dev, "Wait for MC idle timed out !\n"); - } - if (adev->flags & AMD_IS_APU) kv_save_regs_for_reset(adev, &kv_save); @@ -1489,26 +1204,11 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu */ static int cik_asic_reset(struct amdgpu_device *adev) { - u32 reset_mask; - - reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); - - if (reset_mask) - cik_set_bios_scratch_engine_hung(adev, true); - - /* try soft reset */ - cik_gpu_soft_reset(adev, reset_mask); - - reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); - - /* try pci config reset */ - if (reset_mask && amdgpu_hard_reset) - cik_gpu_pci_config_reset(adev); + cik_set_bios_scratch_engine_hung(adev, true); - reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); + cik_gpu_pci_config_reset(adev); - if (!reset_mask) - cik_set_bios_scratch_engine_hung(adev, false); + cik_set_bios_scratch_engine_hung(adev, false); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 5f712ceddf08..675f34916aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -295,30 +295,6 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** - * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring - * - * @ring: amdgpu_ring structure holding ring information - * @semaphore: amdgpu semaphore object - * @emit_wait: wait or signal semaphore - * - * Add a DMA semaphore packet to the ring wait on or signal - * other rings (CIK). - */ -static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - u64 addr = semaphore->gpu_addr; - u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; - - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); - amdgpu_ring_write(ring, addr & 0xfffffff8); - amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); - - return true; -} - -/** * cik_sdma_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer @@ -417,6 +393,9 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], + adev->gfx.config.gb_addr_config & 0x70); + WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); @@ -584,7 +563,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ring_lock(ring, 5); + r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_wb_free(adev, index); @@ -595,7 +574,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); amdgpu_ring_write(ring, 1); /* number of DWs to follow */ amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -645,7 +624,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err0; @@ -657,9 +636,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err1; @@ -738,7 +716,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, * Update PTEs by writing them manually using sDMA (CIK). */ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, - uint64_t pe, + const dma_addr_t *pages_addr, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { @@ -757,14 +735,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & AMDGPU_PTE_SYSTEM) { - value = amdgpu_vm_map_gart(ib->ring->adev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & AMDGPU_PTE_VALID) { - value = addr; - } else { - value = 0; - } + value = amdgpu_vm_map_gart(pages_addr, addr); addr += incr; value |= flags; ib->ptr[ib->length_dw++] = value; @@ -827,9 +798,9 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, * @ib: indirect buffer to fill with padding * */ -static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) +static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); + struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); u32 pad_count; int i; @@ -1097,6 +1068,8 @@ static void cik_sdma_print_status(void *handle) i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", + i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { cik_srbm_select(adev, 0, 0, 0, j); @@ -1297,12 +1270,12 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { .parse_cs = NULL, .emit_ib = cik_sdma_ring_emit_ib, .emit_fence = cik_sdma_ring_emit_fence, - .emit_semaphore = cik_sdma_ring_emit_semaphore, .emit_vm_flush = cik_sdma_ring_emit_vm_flush, .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, .test_ring = cik_sdma_ring_test_ring, .test_ib = cik_sdma_ring_test_ib, .insert_nop = cik_sdma_ring_insert_nop, + .pad_ib = cik_sdma_ring_pad_ib, }; static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) @@ -1399,14 +1372,18 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { .copy_pte = cik_sdma_vm_copy_pte, .write_pte = cik_sdma_vm_write_pte, .set_pte_pde = cik_sdma_vm_set_pte_pde, - .pad_ib = cik_sdma_vm_pad_ib, }; static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) { + unsigned i; + if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; - adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; - adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; + for (i = 0; i < adev->sdma.num_instances; i++) + adev->vm_manager.vm_pte_rings[i] = + &adev->sdma.instance[i].ring; + + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 093599aba64b..e3ff809a0cae 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2670,7 +2670,6 @@ static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); drm_crtc_cleanup(crtc); - destroy_workqueue(amdgpu_crtc->pflip_queue); kfree(amdgpu_crtc); } @@ -2890,7 +2889,6 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; - amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); adev->mode_info.crtcs[index] = amdgpu_crtc; amdgpu_crtc->max_cursor_width = 128; @@ -3366,7 +3364,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); + schedule_work(&works->unpin_work); return 0; } @@ -3624,16 +3622,8 @@ dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode) } -static bool dce_v10_0_ext_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = { .dpms = dce_v10_0_ext_dpms, - .mode_fixup = dce_v10_0_ext_mode_fixup, .prepare = dce_v10_0_ext_prepare, .mode_set = dce_v10_0_ext_mode_set, .commit = dce_v10_0_ext_commit, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 8e67249d4367..6b6c9b6879ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2661,7 +2661,6 @@ static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); drm_crtc_cleanup(crtc); - destroy_workqueue(amdgpu_crtc->pflip_queue); kfree(amdgpu_crtc); } @@ -2881,7 +2880,6 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; - amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); adev->mode_info.crtcs[index] = amdgpu_crtc; amdgpu_crtc->max_cursor_width = 128; @@ -3361,7 +3359,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); + schedule_work(&works->unpin_work); return 0; } @@ -3619,16 +3617,8 @@ dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode) } -static bool dce_v11_0_ext_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = { .dpms = dce_v11_0_ext_dpms, - .mode_fixup = dce_v11_0_ext_mode_fixup, .prepare = dce_v11_0_ext_prepare, .mode_set = dce_v11_0_ext_mode_set, .commit = dce_v11_0_ext_commit, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index d0e128c24813..56bea36a6b18 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2582,7 +2582,6 @@ static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); drm_crtc_cleanup(crtc); - destroy_workqueue(amdgpu_crtc->pflip_queue); kfree(amdgpu_crtc); } @@ -2809,7 +2808,6 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; - amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); adev->mode_info.crtcs[index] = amdgpu_crtc; amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; @@ -3375,7 +3373,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); + schedule_work(&works->unpin_work); return 0; } @@ -3554,16 +3552,8 @@ dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode) } -static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = { .dpms = dce_v8_0_ext_dpms, - .mode_fixup = dce_v8_0_ext_mode_fixup, .prepare = dce_v8_0_ext_prepare, .mode_set = dce_v8_0_ext_mode_set, .commit = dce_v8_0_ext_commit, diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index e35340afd3db..b336c918d6a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -272,6 +272,12 @@ static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev) if (!adev->pm.fw) return -EINVAL; + /* Skip SMC ucode loading on SR-IOV capable boards. + * vbios does this for us in asic_init in that case. + */ + if (adev->virtualization.supports_sr_iov) + return 0; + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; amdgpu_ucode_print_smc_hdr(&hdr->header); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 6c76139de1c9..250bcbce7fdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -31,8 +31,6 @@ #include "amdgpu_ucode.h" #include "clearstate_ci.h" -#include "uvd/uvd_4_2_d.h" - #include "dce/dce_8_0_d.h" #include "dce/dce_8_0_sh_mask.h" @@ -1006,9 +1004,15 @@ out: */ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) { - const u32 num_tile_mode_states = 32; - const u32 num_secondary_tile_mode_states = 16; - u32 reg_offset, gb_tile_moden, split_equal_to_row_size; + const u32 num_tile_mode_states = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + const u32 num_secondary_tile_mode_states = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + u32 reg_offset, split_equal_to_row_size; + uint32_t *tile, *macrotile; + + tile = adev->gfx.config.tile_mode_array; + macrotile = adev->gfx.config.macrotile_mode_array; switch (adev->gfx.config.mem_row_size_in_kb) { case 1: @@ -1023,832 +1027,531 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) break; } + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + tile[reg_offset] = 0; + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + macrotile[reg_offset] = 0; + switch (adev->asic_type) { case CHIP_BONAIRE: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 7: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P4_16x16)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 12: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 17: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 23: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 30: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[7] = (TILE_SPLIT(split_equal_to_row_size)); + tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P4_16x16)); + tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[12] = (TILE_SPLIT(split_equal_to_row_size)); + tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[17] = (TILE_SPLIT(split_equal_to_row_size)); + tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[23] = (TILE_SPLIT(split_equal_to_row_size)); + tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[30] = (TILE_SPLIT(split_equal_to_row_size)); + + macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); break; case CHIP_HAWAII: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 7: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 12: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 17: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 23: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 30: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P4_16x16) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | - NUM_BANKS(ADDR_SURF_4_BANK)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); + tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); + tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + + macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); break; case CHIP_KABINI: case CHIP_KAVERI: case CHIP_MULLINS: default: - for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); - break; - case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | - TILE_SPLIT(split_equal_to_row_size)); - break; - case 7: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - - case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - PIPE_CONFIG(ADDR_SURF_P2)); - break; - case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); - break; - case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 12: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); - break; - case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 17: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 18: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 19: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); - break; - case 20: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 23: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 26: - gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); - break; - case 27: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); - break; - case 28: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); - break; - case 29: - gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | - PIPE_CONFIG(ADDR_SURF_P2) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | - SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); - break; - case 30: - gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); - } - for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { - switch (reg_offset) { - case 0: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 1: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 2: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 3: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 4: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 5: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 6: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - case 8: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 9: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 10: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 11: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 12: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 13: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | - NUM_BANKS(ADDR_SURF_16_BANK)); - break; - case 14: - gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | - NUM_BANKS(ADDR_SURF_8_BANK)); - break; - default: - gb_tile_moden = 0; - break; - } - adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; - WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); - } + tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + tile[7] = (TILE_SPLIT(split_equal_to_row_size)); + tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P2)); + tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[12] = (TILE_SPLIT(split_equal_to_row_size)); + tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[17] = (TILE_SPLIT(split_equal_to_row_size)); + tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); + tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[23] = (TILE_SPLIT(split_equal_to_row_size)); + tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + tile[30] = (TILE_SPLIT(split_equal_to_row_size)); + + macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) + if (reg_offset != 7) + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); break; } } @@ -1893,45 +1596,31 @@ void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) */ static u32 gfx_v7_0_create_bitmask(u32 bit_width) { - u32 i, mask = 0; - - for (i = 0; i < bit_width; i++) { - mask <<= 1; - mask |= 1; - } - return mask; + return (u32)((1ULL << bit_width) - 1); } /** - * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs + * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs * * @adev: amdgpu_device pointer - * @max_rb_num: max RBs (render backends) for the asic - * @se_num: number of SEs (shader engines) for the asic - * @sh_per_se: number of SH blocks per SE for the asic * - * Calculates the bitmask of disabled RBs (CIK). - * Returns the disabled RB bitmask. + * Calculates the bitmask of enabled RBs (CIK). + * Returns the enabled RB bitmask. */ -static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev, - u32 max_rb_num_per_se, - u32 sh_per_se) +static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; data = RREG32(mmCC_RB_BACKEND_DISABLE); - if (data & 1) - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - else - data = 0; - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; - mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se); + mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se); - return data & mask; + return (~data) & mask; } /** @@ -1940,73 +1629,36 @@ static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev, * @adev: amdgpu_device pointer * @se_num: number of SEs (shader engines) for the asic * @sh_per_se: number of SH blocks per SE for the asic - * @max_rb_num: max RBs (render backends) for the asic * * Configures per-SE/SH RB registers (CIK). */ -static void gfx_v7_0_setup_rb(struct amdgpu_device *adev, - u32 se_num, u32 sh_per_se, - u32 max_rb_num_per_se) +static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) { int i, j; - u32 data, mask; - u32 disabled_rbs = 0; - u32 enabled_rbs = 0; + u32 data, tmp, num_rbs = 0; + u32 active_rbs = 0; mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - for (j = 0; j < sh_per_se; j++) { + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v7_0_select_se_sh(adev, i, j); - data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se); + data = gfx_v7_0_get_rb_active_bitmap(adev); if (adev->asic_type == CHIP_HAWAII) - disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + HAWAII_RB_BITMAP_WIDTH_PER_SH); else - disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + CIK_RB_BITMAP_WIDTH_PER_SH); } } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); - mask = 1; - for (i = 0; i < max_rb_num_per_se * se_num; i++) { - if (!(disabled_rbs & mask)) - enabled_rbs |= mask; - mask <<= 1; - } - - adev->gfx.config.backend_enable_mask = enabled_rbs; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - gfx_v7_0_select_se_sh(adev, i, 0xffffffff); - data = 0; - for (j = 0; j < sh_per_se; j++) { - switch (enabled_rbs & 3) { - case 0: - if (j == 0) - data |= (RASTER_CONFIG_RB_MAP_3 << - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); - else - data |= (RASTER_CONFIG_RB_MAP_0 << - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); - break; - case 1: - data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); - break; - case 2: - data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); - break; - case 3: - default: - data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); - break; - } - enabled_rbs >>= 2; - } - WREG32(mmPA_SC_RASTER_CONFIG, data); - } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); + adev->gfx.config.backend_enable_mask = active_rbs; + tmp = active_rbs; + while (tmp >>= 1) + num_rbs++; + adev->gfx.config.num_rbs = num_rbs; } /** @@ -2059,192 +1711,23 @@ static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev) */ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) { - u32 gb_addr_config; - u32 mc_shared_chmap, mc_arb_ramcfg; - u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; - u32 sh_mem_cfg; - u32 tmp; + u32 tmp, sh_mem_cfg; int i; - switch (adev->asic_type) { - case CHIP_BONAIRE: - adev->gfx.config.max_shader_engines = 2; - adev->gfx.config.max_tile_pipes = 4; - adev->gfx.config.max_cu_per_sh = 7; - adev->gfx.config.max_sh_per_se = 1; - adev->gfx.config.max_backends_per_se = 2; - adev->gfx.config.max_texture_channel_caches = 4; - adev->gfx.config.max_gprs = 256; - adev->gfx.config.max_gs_threads = 32; - adev->gfx.config.max_hw_contexts = 8; - - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; - gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; - break; - case CHIP_HAWAII: - adev->gfx.config.max_shader_engines = 4; - adev->gfx.config.max_tile_pipes = 16; - adev->gfx.config.max_cu_per_sh = 11; - adev->gfx.config.max_sh_per_se = 1; - adev->gfx.config.max_backends_per_se = 4; - adev->gfx.config.max_texture_channel_caches = 16; - adev->gfx.config.max_gprs = 256; - adev->gfx.config.max_gs_threads = 32; - adev->gfx.config.max_hw_contexts = 8; - - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; - gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; - break; - case CHIP_KAVERI: - adev->gfx.config.max_shader_engines = 1; - adev->gfx.config.max_tile_pipes = 4; - if ((adev->pdev->device == 0x1304) || - (adev->pdev->device == 0x1305) || - (adev->pdev->device == 0x130C) || - (adev->pdev->device == 0x130F) || - (adev->pdev->device == 0x1310) || - (adev->pdev->device == 0x1311) || - (adev->pdev->device == 0x131C)) { - adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1309) || - (adev->pdev->device == 0x130A) || - (adev->pdev->device == 0x130D) || - (adev->pdev->device == 0x1313) || - (adev->pdev->device == 0x131D)) { - adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1306) || - (adev->pdev->device == 0x1307) || - (adev->pdev->device == 0x130B) || - (adev->pdev->device == 0x130E) || - (adev->pdev->device == 0x1315) || - (adev->pdev->device == 0x131B)) { - adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; - } else { - adev->gfx.config.max_cu_per_sh = 3; - adev->gfx.config.max_backends_per_se = 1; - } - adev->gfx.config.max_sh_per_se = 1; - adev->gfx.config.max_texture_channel_caches = 4; - adev->gfx.config.max_gprs = 256; - adev->gfx.config.max_gs_threads = 16; - adev->gfx.config.max_hw_contexts = 8; - - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; - gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; - break; - case CHIP_KABINI: - case CHIP_MULLINS: - default: - adev->gfx.config.max_shader_engines = 1; - adev->gfx.config.max_tile_pipes = 2; - adev->gfx.config.max_cu_per_sh = 2; - adev->gfx.config.max_sh_per_se = 1; - adev->gfx.config.max_backends_per_se = 1; - adev->gfx.config.max_texture_channel_caches = 2; - adev->gfx.config.max_gprs = 256; - adev->gfx.config.max_gs_threads = 16; - adev->gfx.config.max_hw_contexts = 8; - - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; - gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; - break; - } - WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT)); - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); - adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); - mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; - - adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; - adev->gfx.config.mem_max_burst_length_bytes = 256; - if (adev->flags & AMD_IS_APU) { - /* Get memory bank mapping mode. */ - tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); - dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); - dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); - - tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); - dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); - dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); - - /* Validate settings in case only one DIMM installed. */ - if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) - dimm00_addr_map = 0; - if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) - dimm01_addr_map = 0; - if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) - dimm10_addr_map = 0; - if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) - dimm11_addr_map = 0; - - /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ - /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ - if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) - adev->gfx.config.mem_row_size_in_kb = 2; - else - adev->gfx.config.mem_row_size_in_kb = 1; - } else { - tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; - adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; - if (adev->gfx.config.mem_row_size_in_kb > 4) - adev->gfx.config.mem_row_size_in_kb = 4; - } - /* XXX use MC settings? */ - adev->gfx.config.shader_engine_tile_size = 32; - adev->gfx.config.num_gpus = 1; - adev->gfx.config.multi_gpu_tile_size = 64; - - /* fix up row size */ - gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; - switch (adev->gfx.config.mem_row_size_in_kb) { - case 1: - default: - gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); - break; - case 2: - gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); - break; - case 4: - gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); - break; - } - adev->gfx.config.gb_addr_config = gb_addr_config; - - WREG32(mmGB_ADDR_CONFIG, gb_addr_config); - WREG32(mmHDP_ADDR_CONFIG, gb_addr_config); - WREG32(mmDMIF_ADDR_CALC, gb_addr_config); - WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70); - WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70); - WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config); - WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); - WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); + WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); gfx_v7_0_tiling_mode_table_init(adev); - gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines, - adev->gfx.config.max_sh_per_se, - adev->gfx.config.max_backends_per_se); + gfx_v7_0_setup_rb(adev); /* set HW defaults for 3D engine */ WREG32(mmCP_MEQ_THRESHOLDS, - (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | - (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); + (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | + (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); mutex_lock(&adev->grbm_idx_mutex); /* @@ -2255,7 +1738,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ - sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, + sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, SH_MEM_ALIGNMENT_MODE_UNALIGNED); mutex_lock(&adev->srbm_mutex); @@ -2379,7 +1862,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) return r; } WREG32(scratch, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_gfx_scratch_free(adev, scratch); @@ -2388,7 +1871,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -2516,36 +1999,6 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, upper_32_bits(seq)); } -/** - * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring - * - * @ring: amdgpu ring buffer object - * @semaphore: amdgpu semaphore object - * @emit_wait: Is this a sempahore wait? - * - * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP - * from running ahead of semaphore waits. - */ -static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; - - amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); - amdgpu_ring_write(ring, addr & 0xffffffff); - amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); - - if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { - /* Prevent the PFP from running ahead of the semaphore wait */ - amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); - amdgpu_ring_write(ring, 0x0); - } - - return true; -} - /* * IB stuff */ @@ -2661,7 +2114,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err1; @@ -2671,9 +2124,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err2; @@ -2842,7 +2294,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v7_0_cp_gfx_enable(adev, true); - r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8); + r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); return r; @@ -2911,7 +2363,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); return 0; } @@ -2989,21 +2441,14 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) { - u32 rptr; - - rptr = ring->adev->wb.wb[ring->rptr_offs]; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs]; } static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u32 wptr; - wptr = RREG32(mmCP_RB0_WPTR); - - return wptr; + return RREG32(mmCP_RB0_WPTR); } static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) @@ -3016,21 +2461,13 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring) { - u32 rptr; - - rptr = ring->adev->wb.wb[ring->rptr_offs]; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs]; } static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring) { - u32 wptr; - /* XXX check if swapping is necessary on BE */ - wptr = ring->adev->wb.wb[ring->wptr_offs]; - - return wptr; + return ring->adev->wb.wb[ring->wptr_offs]; } static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) @@ -3126,21 +2563,6 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) } /** - * gfx_v7_0_cp_compute_start - start the compute queues - * - * @adev: amdgpu_device pointer - * - * Enable the compute queues. - * Returns 0 for success, error for failure. - */ -static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev) -{ - gfx_v7_0_cp_compute_enable(adev, true); - - return 0; -} - -/** * gfx_v7_0_cp_compute_fini - stop the compute queues * * @adev: amdgpu_device pointer @@ -3330,9 +2752,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) u32 *buf; struct bonaire_mqd *mqd; - r = gfx_v7_0_cp_compute_start(adev); - if (r) - return r; + gfx_v7_0_cp_compute_enable(adev, true); /* fix up chicken bits */ tmp = RREG32(mmCP_CPF_DEBUG); @@ -4395,28 +3815,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, } } -static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev, - u32 se, u32 sh) +static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) { - u32 mask = 0, tmp, tmp1; - int i; - - gfx_v7_0_select_se_sh(adev, se, sh); - tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + u32 data, mask; - tmp &= 0xffff0000; + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); + data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - tmp |= tmp1; - tmp >>= 16; + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; - for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { - mask <<= 1; - mask |= 1; - } + mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se); - return (~tmp) & mask; + return (~data) & mask; } static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev) @@ -4754,6 +4166,172 @@ static int gfx_v7_0_late_init(void *handle) return 0; } +static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) +{ + u32 gb_addr_config; + u32 mc_shared_chmap, mc_arb_ramcfg; + u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; + u32 tmp; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->gfx.config.max_shader_engines = 2; + adev->gfx.config.max_tile_pipes = 4; + adev->gfx.config.max_cu_per_sh = 7; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_HAWAII: + adev->gfx.config.max_shader_engines = 4; + adev->gfx.config.max_tile_pipes = 16; + adev->gfx.config.max_cu_per_sh = 11; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 4; + adev->gfx.config.max_texture_channel_caches = 16; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_KAVERI: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 4; + if ((adev->pdev->device == 0x1304) || + (adev->pdev->device == 0x1305) || + (adev->pdev->device == 0x130C) || + (adev->pdev->device == 0x130F) || + (adev->pdev->device == 0x1310) || + (adev->pdev->device == 0x1311) || + (adev->pdev->device == 0x131C)) { + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_backends_per_se = 2; + } else if ((adev->pdev->device == 0x1309) || + (adev->pdev->device == 0x130A) || + (adev->pdev->device == 0x130D) || + (adev->pdev->device == 0x1313) || + (adev->pdev->device == 0x131D)) { + adev->gfx.config.max_cu_per_sh = 6; + adev->gfx.config.max_backends_per_se = 2; + } else if ((adev->pdev->device == 0x1306) || + (adev->pdev->device == 0x1307) || + (adev->pdev->device == 0x130B) || + (adev->pdev->device == 0x130E) || + (adev->pdev->device == 0x1315) || + (adev->pdev->device == 0x131B)) { + adev->gfx.config.max_cu_per_sh = 4; + adev->gfx.config.max_backends_per_se = 1; + } else { + adev->gfx.config.max_cu_per_sh = 3; + adev->gfx.config.max_backends_per_se = 1; + } + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 16; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + default: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 2; + adev->gfx.config.max_cu_per_sh = 2; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 1; + adev->gfx.config.max_texture_channel_caches = 2; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 16; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; + break; + } + + mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); + adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); + mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; + adev->gfx.config.mem_max_burst_length_bytes = 256; + if (adev->flags & AMD_IS_APU) { + /* Get memory bank mapping mode. */ + tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); + dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); + dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); + + tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); + dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); + dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); + + /* Validate settings in case only one DIMM installed. */ + if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) + dimm00_addr_map = 0; + if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) + dimm01_addr_map = 0; + if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) + dimm10_addr_map = 0; + if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) + dimm11_addr_map = 0; + + /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ + /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ + if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) + adev->gfx.config.mem_row_size_in_kb = 2; + else + adev->gfx.config.mem_row_size_in_kb = 1; + } else { + tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; + adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; + if (adev->gfx.config.mem_row_size_in_kb > 4) + adev->gfx.config.mem_row_size_in_kb = 4; + } + /* XXX use MC settings? */ + adev->gfx.config.shader_engine_tile_size = 32; + adev->gfx.config.num_gpus = 1; + adev->gfx.config.multi_gpu_tile_size = 64; + + /* fix up row size */ + gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; + switch (adev->gfx.config.mem_row_size_in_kb) { + case 1: + default: + gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); + break; + case 2: + gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); + break; + case 4: + gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); + break; + } + adev->gfx.config.gb_addr_config = gb_addr_config; +} + static int gfx_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; @@ -4857,6 +4435,10 @@ static int gfx_v7_0_sw_init(void *handle) if (r) return r; + adev->gfx.ce_ram_size = 0x8000; + + gfx_v7_0_gpu_early_init(adev); + return r; } @@ -4897,8 +4479,6 @@ static int gfx_v7_0_hw_init(void *handle) if (r) return r; - adev->gfx.ce_ram_size = 0x8000; - return r; } @@ -5015,16 +4595,6 @@ static void gfx_v7_0_print_status(void *handle) RREG32(mmHDP_ADDR_CONFIG)); dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", RREG32(mmDMIF_ADDR_CALC)); - dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", - RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); - dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", - RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", RREG32(mmCP_MEQ_THRESHOLDS)); @@ -5567,13 +5137,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .parse_cs = NULL, .emit_ib = gfx_v7_0_ring_emit_ib_gfx, .emit_fence = gfx_v7_0_ring_emit_fence_gfx, - .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .test_ring = gfx_v7_0_ring_test_ring, .test_ib = gfx_v7_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { @@ -5583,13 +5153,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .parse_cs = NULL, .emit_ib = gfx_v7_0_ring_emit_ib_compute, .emit_fence = gfx_v7_0_ring_emit_fence_compute, - .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .test_ring = gfx_v7_0_ring_test_ring, .test_ib = gfx_v7_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) @@ -5659,7 +5229,7 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev) int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, - struct amdgpu_cu_info *cu_info) + struct amdgpu_cu_info *cu_info) { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; @@ -5673,10 +5243,11 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j); + bitmap = gfx_v7_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + for (k = 0; k < 16; k ++) { if (bitmap & mask) { if (counter < 2) ao_bitmap |= mask; @@ -5688,9 +5259,11 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; - mutex_unlock(&adev->grbm_idx_mutex); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 8f8ec37ecd88..10c865087d0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -43,9 +43,6 @@ #include "gca/gfx_8_0_sh_mask.h" #include "gca/gfx_8_0_enum.h" -#include "uvd/uvd_5_0_d.h" -#include "uvd/uvd_5_0_sh_mask.h" - #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" @@ -652,7 +649,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) return r; } WREG32(scratch, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); @@ -662,7 +659,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -699,7 +696,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err1; @@ -709,9 +706,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err2; @@ -1171,7 +1167,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* allocate an indirect buffer to put the commands in */ memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, total_size, &ib); + r = amdgpu_ib_get(adev, NULL, total_size, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); return r; @@ -1266,9 +1262,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); /* shedule the ib on the ring */ - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) { DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); goto fail; @@ -2574,11 +2569,6 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) } } -static u32 gfx_v8_0_create_bitmask(u32 bit_width) -{ - return (u32)((1ULL << bit_width) - 1); -} - void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) { u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); @@ -2599,89 +2589,50 @@ void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) WREG32(mmGRBM_GFX_INDEX, data); } -static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev, - u32 max_rb_num_per_se, - u32 sh_per_se) +static u32 gfx_v8_0_create_bitmask(u32 bit_width) +{ + return (u32)((1ULL << bit_width) - 1); +} + +static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; data = RREG32(mmCC_RB_BACKEND_DISABLE); - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; - mask = gfx_v8_0_create_bitmask(max_rb_num_per_se / sh_per_se); + mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se); - return data & mask; + return (~data) & mask; } -static void gfx_v8_0_setup_rb(struct amdgpu_device *adev, - u32 se_num, u32 sh_per_se, - u32 max_rb_num_per_se) +static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) { int i, j; - u32 data, mask; - u32 disabled_rbs = 0; - u32 enabled_rbs = 0; + u32 data, tmp, num_rbs = 0; + u32 active_rbs = 0; mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - for (j = 0; j < sh_per_se; j++) { + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v8_0_select_se_sh(adev, i, j); - data = gfx_v8_0_get_rb_disabled(adev, - max_rb_num_per_se, sh_per_se); - disabled_rbs |= data << ((i * sh_per_se + j) * - RB_BITMAP_WIDTH_PER_SH); + data = gfx_v8_0_get_rb_active_bitmap(adev); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + RB_BITMAP_WIDTH_PER_SH); } } gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); - mask = 1; - for (i = 0; i < max_rb_num_per_se * se_num; i++) { - if (!(disabled_rbs & mask)) - enabled_rbs |= mask; - mask <<= 1; - } - - adev->gfx.config.backend_enable_mask = enabled_rbs; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - gfx_v8_0_select_se_sh(adev, i, 0xffffffff); - data = RREG32(mmPA_SC_RASTER_CONFIG); - for (j = 0; j < sh_per_se; j++) { - switch (enabled_rbs & 3) { - case 0: - if (j == 0) - data |= (RASTER_CONFIG_RB_MAP_3 << - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); - else - data |= (RASTER_CONFIG_RB_MAP_0 << - PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); - break; - case 1: - data |= (RASTER_CONFIG_RB_MAP_0 << - (i * sh_per_se + j) * 2); - break; - case 2: - data |= (RASTER_CONFIG_RB_MAP_3 << - (i * sh_per_se + j) * 2); - break; - case 3: - default: - data |= (RASTER_CONFIG_RB_MAP_2 << - (i * sh_per_se + j) * 2); - break; - } - enabled_rbs >>= 2; - } - WREG32(mmPA_SC_RASTER_CONFIG, data); - } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); + adev->gfx.config.backend_enable_mask = active_rbs; + tmp = active_rbs; + while (tmp >>= 1) + num_rbs++; + adev->gfx.config.num_rbs = num_rbs; } /** @@ -2741,19 +2692,10 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); - WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, - adev->gfx.config.gb_addr_config & 0x70); - WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, - adev->gfx.config.gb_addr_config & 0x70); - WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); gfx_v8_0_tiling_mode_table_init(adev); - gfx_v8_0_setup_rb(adev, adev->gfx.config.max_shader_engines, - adev->gfx.config.max_sh_per_se, - adev->gfx.config.max_backends_per_se); + gfx_v8_0_setup_rb(adev); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -3062,7 +3004,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v8_0_cp_gfx_enable(adev, true); - r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4); + r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); return r; @@ -3126,7 +3068,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x8000); amdgpu_ring_write(ring, 0x8000); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); return 0; } @@ -3226,13 +3168,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) udelay(50); } -static int gfx_v8_0_cp_compute_start(struct amdgpu_device *adev) -{ - gfx_v8_0_cp_compute_enable(adev, true); - - return 0; -} - static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) { const struct gfx_firmware_header_v1_0 *mec_hdr; @@ -3802,9 +3737,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_PQ_STATUS, tmp); } - r = gfx_v8_0_cp_compute_start(adev); - if (r) - return r; + gfx_v8_0_cp_compute_enable(adev, true); for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; @@ -4016,16 +3949,6 @@ static void gfx_v8_0_print_status(void *handle) RREG32(mmHDP_ADDR_CONFIG)); dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", RREG32(mmDMIF_ADDR_CALC)); - dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", - RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); - dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", - RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); - dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); - dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", - RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", RREG32(mmCP_MEQ_THRESHOLDS)); @@ -4762,49 +4685,11 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, } -/** - * gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring - * - * @ring: amdgpu ring buffer object - * @semaphore: amdgpu semaphore object - * @emit_wait: Is this a sempahore wait? - * - * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP - * from running ahead of semaphore waits. - */ -static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; - - if (ring->adev->asic_type == CHIP_TOPAZ || - ring->adev->asic_type == CHIP_TONGA || - ring->adev->asic_type == CHIP_FIJI) - /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */ - return false; - else { - amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - amdgpu_ring_write(ring, sel); - } - - if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { - /* Prevent the PFP from running ahead of the semaphore wait */ - amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); - amdgpu_ring_write(ring, 0x0); - } - - return true; -} - static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); - uint32_t seq = ring->fence_drv.sync_seq[ring->idx]; + uint32_t seq = ring->fence_drv.sync_seq; uint64_t addr = ring->fence_drv.gpu_addr; amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); @@ -5145,13 +5030,13 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .parse_cs = NULL, .emit_ib = gfx_v8_0_ring_emit_ib_gfx, .emit_fence = gfx_v8_0_ring_emit_fence_gfx, - .emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, .test_ring = gfx_v8_0_ring_test_ring, .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { @@ -5161,13 +5046,13 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .parse_cs = NULL, .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_compute, - .emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, .test_ring = gfx_v8_0_ring_test_ring, .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) @@ -5236,32 +5121,24 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) } } -static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev, - u32 se, u32 sh) +static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { - u32 mask = 0, tmp, tmp1; - int i; - - gfx_v8_0_select_se_sh(adev, se, sh); - tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + u32 data, mask; - tmp &= 0xffff0000; + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); + data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - tmp |= tmp1; - tmp >>= 16; + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; - for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { - mask <<= 1; - mask |= 1; - } + mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se); - return (~tmp) & mask; + return (~data) & mask; } int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, - struct amdgpu_cu_info *cu_info) + struct amdgpu_cu_info *cu_info) { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; @@ -5275,10 +5152,11 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - bitmap = gfx_v8_0_get_cu_active_bitmap(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j); + bitmap = gfx_v8_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + for (k = 0; k < 16; k ++) { if (bitmap & mask) { if (counter < 2) ao_bitmap |= mask; @@ -5290,9 +5168,11 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; - mutex_unlock(&adev->grbm_idx_mutex); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 8aa2991ab379..68ee66b38e5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -694,7 +694,8 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; + amdgpu_vm_manager_init(adev); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) { @@ -926,10 +927,6 @@ static int gmc_v7_0_sw_init(void *handle) int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_gem_init(adev); - if (r) - return r; - r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); if (r) return r; @@ -1010,7 +1007,7 @@ static int gmc_v7_0_sw_fini(void *handle) adev->vm_manager.enabled = false; } gmc_v7_0_gart_fini(adev); - amdgpu_gem_fini(adev); + amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 3efd45546241..757803ae7c4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -252,6 +252,12 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) if (!adev->mc.fw) return -EINVAL; + /* Skip MC ucode loading on SR-IOV capable boards. + * vbios does this for us in asic_init in that case. + */ + if (adev->virtualization.supports_sr_iov) + return 0; + hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; amdgpu_ucode_print_mc_hdr(&hdr->header); @@ -774,7 +780,8 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; + amdgpu_vm_manager_init(adev); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) { @@ -880,10 +887,6 @@ static int gmc_v8_0_sw_init(void *handle) int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_gem_init(adev); - if (r) - return r; - r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); if (r) return r; @@ -964,7 +967,7 @@ static int gmc_v8_0_sw_fini(void *handle) adev->vm_manager.enabled = false; } gmc_v8_0_gart_fini(adev); - amdgpu_gem_fini(adev); + amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index 090486c18249..52ee08193295 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -279,6 +279,12 @@ static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev) if (!adev->pm.fw) return -EINVAL; + /* Skip SMC ucode loading on SR-IOV capable boards. + * vbios does this for us in asic_init in that case. + */ + if (adev->virtualization.supports_sr_iov) + return 0; + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; amdgpu_ucode_print_smc_hdr(&hdr->header); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 2cf50180cc51..29ec986dd6fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -335,31 +335,6 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se } /** - * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring - * - * @ring: amdgpu_ring structure holding ring information - * @semaphore: amdgpu semaphore object - * @emit_wait: wait or signal semaphore - * - * Add a DMA semaphore packet to the ring wait on or signal - * other rings (VI). - */ -static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - u64 addr = semaphore->gpu_addr; - u32 sig = emit_wait ? 0 : 1; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | - SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); - amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - return true; -} - -/** * sdma_v2_4_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer @@ -459,6 +434,9 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], + adev->gfx.config.gb_addr_config & 0x70); + WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); /* Set ring buffer size in dwords */ @@ -636,7 +614,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ring_lock(ring, 5); + r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_wb_free(adev, index); @@ -649,7 +627,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -699,7 +677,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err0; @@ -716,9 +694,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err1; @@ -797,7 +774,7 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, * Update PTEs by writing them manually using sDMA (CIK). */ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, - uint64_t pe, + const dma_addr_t *pages_addr, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { @@ -816,14 +793,7 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & AMDGPU_PTE_SYSTEM) { - value = amdgpu_vm_map_gart(ib->ring->adev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & AMDGPU_PTE_VALID) { - value = addr; - } else { - value = 0; - } + value = amdgpu_vm_map_gart(pages_addr, addr); addr += incr; value |= flags; ib->ptr[ib->length_dw++] = value; @@ -881,14 +851,14 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, } /** - * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw + * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ -static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) +static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); + struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); u32 pad_count; int i; @@ -1111,6 +1081,8 @@ static void sdma_v2_4_print_status(void *handle) i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", + i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { vi_srbm_select(adev, 0, 0, 0, j); @@ -1302,12 +1274,12 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { .parse_cs = NULL, .emit_ib = sdma_v2_4_ring_emit_ib, .emit_fence = sdma_v2_4_ring_emit_fence, - .emit_semaphore = sdma_v2_4_ring_emit_semaphore, .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, .test_ring = sdma_v2_4_ring_test_ring, .test_ib = sdma_v2_4_ring_test_ib, .insert_nop = sdma_v2_4_ring_insert_nop, + .pad_ib = sdma_v2_4_ring_pad_ib, }; static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) @@ -1405,14 +1377,18 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { .copy_pte = sdma_v2_4_vm_copy_pte, .write_pte = sdma_v2_4_vm_write_pte, .set_pte_pde = sdma_v2_4_vm_set_pte_pde, - .pad_ib = sdma_v2_4_vm_pad_ib, }; static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) { + unsigned i; + if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; - adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; - adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; + for (i = 0; i < adev->sdma.num_instances; i++) + adev->vm_manager.vm_pte_rings[i] = + &adev->sdma.instance[i].ring; + + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index ad54c46751b0..6f064d7076e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -444,32 +444,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } - -/** - * sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring - * - * @ring: amdgpu_ring structure holding ring information - * @semaphore: amdgpu semaphore object - * @emit_wait: wait or signal semaphore - * - * Add a DMA semaphore packet to the ring wait on or signal - * other rings (VI). - */ -static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - u64 addr = semaphore->gpu_addr; - u32 sig = emit_wait ? 0 : 1; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | - SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); - amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - return true; -} - /** * sdma_v3_0_gfx_stop - stop the gfx async dma engines * @@ -596,6 +570,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], + adev->gfx.config.gb_addr_config & 0x70); + WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); /* Set ring buffer size in dwords */ @@ -788,7 +765,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ring_lock(ring, 5); + r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_wb_free(adev, index); @@ -801,7 +778,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -851,7 +828,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(ring, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); goto err0; @@ -868,9 +845,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, - AMDGPU_FENCE_OWNER_UNDEFINED, - &f); + r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, + NULL, &f); if (r) goto err1; @@ -948,7 +924,7 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, * Update PTEs by writing them manually using sDMA (CIK). */ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, - uint64_t pe, + const dma_addr_t *pages_addr, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { @@ -967,14 +943,7 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & AMDGPU_PTE_SYSTEM) { - value = amdgpu_vm_map_gart(ib->ring->adev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & AMDGPU_PTE_VALID) { - value = addr; - } else { - value = 0; - } + value = amdgpu_vm_map_gart(pages_addr, addr); addr += incr; value |= flags; ib->ptr[ib->length_dw++] = value; @@ -1032,14 +1001,14 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, } /** - * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw + * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ -static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) +static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); + struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); u32 pad_count; int i; @@ -1275,6 +1244,8 @@ static void sdma_v3_0_print_status(void *handle) i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n", i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n", + i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i])); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { vi_srbm_select(adev, 0, 0, 0, j); @@ -1570,12 +1541,12 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { .parse_cs = NULL, .emit_ib = sdma_v3_0_ring_emit_ib, .emit_fence = sdma_v3_0_ring_emit_fence, - .emit_semaphore = sdma_v3_0_ring_emit_semaphore, .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, .test_ring = sdma_v3_0_ring_test_ring, .test_ib = sdma_v3_0_ring_test_ib, .insert_nop = sdma_v3_0_ring_insert_nop, + .pad_ib = sdma_v3_0_ring_pad_ib, }; static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) @@ -1673,14 +1644,18 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { .copy_pte = sdma_v3_0_vm_copy_pte, .write_pte = sdma_v3_0_vm_write_pte, .set_pte_pde = sdma_v3_0_vm_set_pte_pde, - .pad_ib = sdma_v3_0_vm_pad_ib, }; static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) { + unsigned i; + if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; - adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; - adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; + for (i = 0; i < adev->sdma.num_instances; i++) + adev->vm_manager.vm_pte_rings[i] = + &adev->sdma.instance[i].ring; + + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 361c49a82323..083893dd68c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -272,6 +272,12 @@ static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev) if (!adev->pm.fw) return -EINVAL; + /* Skip SMC ucode loading on SR-IOV capable boards. + * vbios does this for us in asic_init in that case. + */ + if (adev->virtualization.supports_sr_iov) + return 0; + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; amdgpu_ucode_print_smc_hdr(&hdr->header); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 5e9f73af83a8..70ed73fa5156 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -164,7 +164,7 @@ static int uvd_v4_2_hw_init(void *handle) goto done; } - r = amdgpu_ring_lock(ring, 10); + r = amdgpu_ring_alloc(ring, 10); if (r) { DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); goto done; @@ -189,7 +189,7 @@ static int uvd_v4_2_hw_init(void *handle) amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); amdgpu_ring_write(ring, 3); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); done: /* lower clocks again */ @@ -439,33 +439,6 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** - * uvd_v4_2_ring_emit_semaphore - emit semaphore command - * - * @ring: amdgpu_ring pointer - * @semaphore: semaphore to emit commands for - * @emit_wait: true if we should emit a wait command - * - * Emit a semaphore command (either wait or signal) to the UVD ring. - */ -static bool uvd_v4_2_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); - amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); - - return true; -} - -/** * uvd_v4_2_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -480,7 +453,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) int r; WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); @@ -488,7 +461,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) } amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) @@ -549,7 +522,7 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); goto error; @@ -603,6 +576,10 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) addr = (adev->uvd.gpu_addr >> 32) & 0xFF; WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + uvd_v4_2_init_cg(adev); } @@ -804,6 +781,13 @@ static void uvd_v4_2_print_status(void *handle) RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", RREG32(mmUVD_CONTEXT_ID)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); + } static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, @@ -882,10 +866,10 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v4_2_ring_emit_ib, .emit_fence = uvd_v4_2_ring_emit_fence, - .emit_semaphore = uvd_v4_2_ring_emit_semaphore, .test_ring = uvd_v4_2_ring_test_ring, .test_ib = uvd_v4_2_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 38864f562981..578ffb62fdb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -160,7 +160,7 @@ static int uvd_v5_0_hw_init(void *handle) goto done; } - r = amdgpu_ring_lock(ring, 10); + r = amdgpu_ring_alloc(ring, 10); if (r) { DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); goto done; @@ -185,7 +185,7 @@ static int uvd_v5_0_hw_init(void *handle) amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); amdgpu_ring_write(ring, 3); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); done: /* lower clocks again */ @@ -279,6 +279,10 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) size = AMDGPU_UVD_HEAP_SIZE; WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE2, size); + + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); } /** @@ -483,33 +487,6 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** - * uvd_v5_0_ring_emit_semaphore - emit semaphore command - * - * @ring: amdgpu_ring pointer - * @semaphore: semaphore to emit commands for - * @emit_wait: true if we should emit a wait command - * - * Emit a semaphore command (either wait or signal) to the UVD ring. - */ -static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); - amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); - - return true; -} - -/** * uvd_v5_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -524,7 +501,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) int r; WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); @@ -532,7 +509,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) } amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) @@ -595,7 +572,7 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); goto error; @@ -751,6 +728,12 @@ static void uvd_v5_0_print_status(void *handle) RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", RREG32(mmUVD_CONTEXT_ID)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); } static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, @@ -821,10 +804,10 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v5_0_ring_emit_ib, .emit_fence = uvd_v5_0_ring_emit_fence, - .emit_semaphore = uvd_v5_0_ring_emit_semaphore, .test_ring = uvd_v5_0_ring_test_ring, .test_ib = uvd_v5_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 3d5913926436..d4da1f04378c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -157,7 +157,7 @@ static int uvd_v6_0_hw_init(void *handle) goto done; } - r = amdgpu_ring_lock(ring, 10); + r = amdgpu_ring_alloc(ring, 10); if (r) { DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); goto done; @@ -182,7 +182,7 @@ static int uvd_v6_0_hw_init(void *handle) amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); amdgpu_ring_write(ring, 3); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); done: if (!r) @@ -277,6 +277,10 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) size = AMDGPU_UVD_HEAP_SIZE; WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE2, size); + + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); } static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, @@ -722,33 +726,6 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** - * uvd_v6_0_ring_emit_semaphore - emit semaphore command - * - * @ring: amdgpu_ring pointer - * @semaphore: semaphore to emit commands for - * @emit_wait: true if we should emit a wait command - * - * Emit a semaphore command (either wait or signal) to the UVD ring. - */ -static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); - amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); - amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); - amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); - - return true; -} - -/** * uvd_v6_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -763,7 +740,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) int r; WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); - r = amdgpu_ring_lock(ring, 3); + r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); @@ -771,7 +748,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) } amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_unlock_commit(ring); + amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) @@ -827,7 +804,7 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); goto error; @@ -974,6 +951,12 @@ static void uvd_v6_0_print_status(void *handle) RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", RREG32(mmUVD_CONTEXT_ID)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); } static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, @@ -1062,10 +1045,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v6_0_ring_emit_ib, .emit_fence = uvd_v6_0_ring_emit_fence, - .emit_semaphore = uvd_v6_0_ring_emit_semaphore, .test_ring = uvd_v6_0_ring_test_ring, .test_ib = uvd_v6_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 52ac7a8f1e58..9c804f436974 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -639,10 +639,10 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { .parse_cs = amdgpu_vce_ring_parse_cs, .emit_ib = amdgpu_vce_ring_emit_ib, .emit_fence = amdgpu_vce_ring_emit_fence, - .emit_semaphore = amdgpu_vce_ring_emit_semaphore, .test_ring = amdgpu_vce_ring_test_ring, .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index e99af81e4aec..8f8d479061f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -759,10 +759,10 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { .parse_cs = amdgpu_vce_ring_parse_cs, .emit_ib = amdgpu_vce_ring_emit_ib, .emit_fence = amdgpu_vce_ring_emit_fence, - .emit_semaphore = amdgpu_vce_ring_emit_semaphore, .test_ring = amdgpu_vce_ring_test_ring, .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, }; static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 89f5a1ff6f43..125003517544 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -74,6 +74,9 @@ #include "uvd_v6_0.h" #include "vce_v3_0.h" #include "amdgpu_powerplay.h" +#if defined(CONFIG_DRM_AMD_ACP) +#include "amdgpu_acp.h" +#endif /* * Indirect registers accessor @@ -571,374 +574,12 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static void vi_print_gpu_status_regs(struct amdgpu_device *adev) -{ - dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", - RREG32(mmGRBM_STATUS)); - dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", - RREG32(mmGRBM_STATUS2)); - dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", - RREG32(mmGRBM_STATUS_SE0)); - dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", - RREG32(mmGRBM_STATUS_SE1)); - dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", - RREG32(mmGRBM_STATUS_SE2)); - dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", - RREG32(mmGRBM_STATUS_SE3)); - dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(mmSRBM_STATUS)); - dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", - RREG32(mmSRBM_STATUS2)); - dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", - RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); - if (adev->sdma.num_instances > 1) { - dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", - RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); - } - dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); - dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT1)); - dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT2)); - dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", - RREG32(mmCP_STALLED_STAT3)); - dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", - RREG32(mmCP_CPF_BUSY_STAT)); - dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPF_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); - dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); - dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", - RREG32(mmCP_CPC_STALLED_STAT1)); - dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); -} - -/** - * vi_gpu_check_soft_reset - check which blocks are busy - * - * @adev: amdgpu_device pointer - * - * Check which blocks are busy and return the relevant reset - * mask to be used by vi_gpu_soft_reset(). - * Returns a mask of the blocks to be reset. - */ -u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev) -{ - u32 reset_mask = 0; - u32 tmp; - - /* GRBM_STATUS */ - tmp = RREG32(mmGRBM_STATUS); - if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | - GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | - GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | - GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | - GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) - reset_mask |= AMDGPU_RESET_GFX; - - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_CP; - - /* GRBM_STATUS2 */ - tmp = RREG32(mmGRBM_STATUS2); - if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) - reset_mask |= AMDGPU_RESET_RLC; - - if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK | - GRBM_STATUS2__CPC_BUSY_MASK | - GRBM_STATUS2__CPG_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_CP; - - /* SRBM_STATUS2 */ - tmp = RREG32(mmSRBM_STATUS2); - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) - reset_mask |= AMDGPU_RESET_DMA; - - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) - reset_mask |= AMDGPU_RESET_DMA1; - - /* SRBM_STATUS */ - tmp = RREG32(mmSRBM_STATUS); - - if (tmp & SRBM_STATUS__IH_BUSY_MASK) - reset_mask |= AMDGPU_RESET_IH; - - if (tmp & SRBM_STATUS__SEM_BUSY_MASK) - reset_mask |= AMDGPU_RESET_SEM; - - if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) - reset_mask |= AMDGPU_RESET_GRBM; - - if (adev->asic_type != CHIP_TOPAZ) { - if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK | - SRBM_STATUS__UVD_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_UVD; - } - - if (tmp & SRBM_STATUS__VMC_BUSY_MASK) - reset_mask |= AMDGPU_RESET_VMC; - - if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | - SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) - reset_mask |= AMDGPU_RESET_MC; - - /* SDMA0_STATUS_REG */ - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET); - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) - reset_mask |= AMDGPU_RESET_DMA; - - /* SDMA1_STATUS_REG */ - if (adev->sdma.num_instances > 1) { - tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); - if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) - reset_mask |= AMDGPU_RESET_DMA1; - } -#if 0 - /* VCE_STATUS */ - if (adev->asic_type != CHIP_TOPAZ) { - tmp = RREG32(mmVCE_STATUS); - if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK) - reset_mask |= AMDGPU_RESET_VCE; - if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK) - reset_mask |= AMDGPU_RESET_VCE1; - - } - - if (adev->asic_type != CHIP_TOPAZ) { - if (amdgpu_display_is_display_hung(adev)) - reset_mask |= AMDGPU_RESET_DISPLAY; - } -#endif - - /* Skip MC reset as it's mostly likely not hung, just busy */ - if (reset_mask & AMDGPU_RESET_MC) { - DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); - reset_mask &= ~AMDGPU_RESET_MC; - } - - return reset_mask; -} - -/** - * vi_gpu_soft_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * @reset_mask: mask of which blocks to reset - * - * Soft reset the blocks specified in @reset_mask. - */ -static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) -{ - struct amdgpu_mode_mc_save save; - u32 grbm_soft_reset = 0, srbm_soft_reset = 0; - u32 tmp; - - if (reset_mask == 0) - return; - - dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask); - - vi_print_gpu_status_regs(adev); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); - dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); - - /* disable CG/PG */ - - /* stop the rlc */ - //XXX - //gfx_v8_0_rlc_stop(adev); - - /* Disable GFX parsing/prefetching */ - tmp = RREG32(mmCP_ME_CNTL); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); - WREG32(mmCP_ME_CNTL, tmp); - - /* Disable MEC parsing/prefetching */ - tmp = RREG32(mmCP_MEC_CNTL); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); - WREG32(mmCP_MEC_CNTL, tmp); - - if (reset_mask & AMDGPU_RESET_DMA) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - } - if (reset_mask & AMDGPU_RESET_DMA1) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - } - - gmc_v8_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { - dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } - - if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) { - grbm_soft_reset = - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); - grbm_soft_reset = - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); - } - - if (reset_mask & AMDGPU_RESET_CP) { - grbm_soft_reset = - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); - } - - if (reset_mask & AMDGPU_RESET_DMA) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1); - - if (reset_mask & AMDGPU_RESET_DMA1) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1); - - if (reset_mask & AMDGPU_RESET_DISPLAY) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1); - - if (reset_mask & AMDGPU_RESET_RLC) - grbm_soft_reset = - REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - - if (reset_mask & AMDGPU_RESET_SEM) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); - - if (reset_mask & AMDGPU_RESET_IH) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1); - - if (reset_mask & AMDGPU_RESET_GRBM) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); - - if (reset_mask & AMDGPU_RESET_VMC) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); - - if (reset_mask & AMDGPU_RESET_UVD) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); - - if (reset_mask & AMDGPU_RESET_VCE) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - - if (reset_mask & AMDGPU_RESET_VCE) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - - if (!(adev->flags & AMD_IS_APU)) { - if (reset_mask & AMDGPU_RESET_MC) - srbm_soft_reset = - REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); - } - - if (grbm_soft_reset) { - tmp = RREG32(mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~grbm_soft_reset; - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - } - - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - } - - /* Wait a little for things to settle down */ - udelay(50); - - gmc_v8_0_mc_resume(adev, &save); - udelay(50); - - vi_print_gpu_status_regs(adev); -} - static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; - u32 tmp, i; + u32 i; dev_info(adev->dev, "GPU pci config reset\n"); - /* disable dpm? */ - - /* disable cg/pg */ - - /* Disable GFX parsing/prefetching */ - tmp = RREG32(mmCP_ME_CNTL); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); - WREG32(mmCP_ME_CNTL, tmp); - - /* Disable MEC parsing/prefetching */ - tmp = RREG32(mmCP_MEC_CNTL); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); - WREG32(mmCP_MEC_CNTL, tmp); - - /* Disable GFX parsing/prefetching */ - WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | - CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); - - /* Disable MEC parsing/prefetching */ - WREG32(mmCP_MEC_CNTL, - CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); - - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - - /* XXX other engines? */ - - /* halt the rlc, disable cp internal ints */ - //XXX - //gfx_v8_0_rlc_stop(adev); - - udelay(50); - - /* disable mem access */ - gmc_v8_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { - dev_warn(adev->dev, "Wait for MC idle timed out !\n"); - } - /* disable BM */ pci_clear_master(adev->pdev); /* reset */ @@ -978,26 +619,11 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun */ static int vi_asic_reset(struct amdgpu_device *adev) { - u32 reset_mask; - - reset_mask = vi_gpu_check_soft_reset(adev); - - if (reset_mask) - vi_set_bios_scratch_engine_hung(adev, true); - - /* try soft reset */ - vi_gpu_soft_reset(adev, reset_mask); - - reset_mask = vi_gpu_check_soft_reset(adev); + vi_set_bios_scratch_engine_hung(adev, true); - /* try pci config reset */ - if (reset_mask && amdgpu_hard_reset) - vi_gpu_pci_config_reset(adev); + vi_gpu_pci_config_reset(adev); - reset_mask = vi_gpu_check_soft_reset(adev); - - if (!reset_mask) - vi_set_bios_scratch_engine_hung(adev, false); + vi_set_bios_scratch_engine_hung(adev, false); return 0; } @@ -1347,6 +973,15 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] = .rev = 0, .funcs = &vce_v3_0_ip_funcs, }, +#if defined(CONFIG_DRM_AMD_ACP) + { + .type = AMD_IP_BLOCK_TYPE_ACP, + .major = 2, + .minor = 2, + .rev = 0, + .funcs = &acp_ip_funcs, + }, +#endif }; int vi_set_ip_blocks(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index ca8410e8683d..850a5623661f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -59,18 +59,23 @@ module_param(send_sigterm, int, 0444); MODULE_PARM_DESC(send_sigterm, "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)"); -bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f) +static int amdkfd_init_completed; + +int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f) { + if (!amdkfd_init_completed) + return -EPROBE_DEFER; + /* * Only one interface version is supported, * no kfd/kgd version skew allowed. */ if (interface_version != KFD_INTERFACE_VERSION) - return false; + return -EINVAL; *g2f = &kgd2kfd; - return true; + return 0; } EXPORT_SYMBOL(kgd2kfd_init); @@ -111,6 +116,8 @@ static int __init kfd_module_init(void) kfd_process_create_wq(); + amdkfd_init_completed = 1; + dev_info(kfd_device, "Initialized module\n"); return 0; @@ -125,6 +132,8 @@ err_pasid: static void __exit kfd_module_exit(void) { + amdkfd_init_completed = 0; + kfd_process_destroy_wq(); kfd_topology_shutdown(); kfd_chardev_exit(); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 1195d06f55bc..15ff8b2c26e7 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -73,6 +73,7 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_SDMA, AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_ACP, }; enum amd_clockgating_state { diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h index dc52ea0df4b4..d3ccf5a86de0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h @@ -1379,6 +1379,7 @@ #define mmDC_GPIO_PAD_STRENGTH_1 0x1978 #define mmDC_GPIO_PAD_STRENGTH_2 0x1979 #define mmPHY_AUX_CNTL 0x197f +#define mmDC_GPIO_I2CPAD_MASK 0x1974 #define mmDC_GPIO_I2CPAD_A 0x1975 #define mmDC_GPIO_I2CPAD_EN 0x1976 #define mmDC_GPIO_I2CPAD_Y 0x1977 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h new file mode 100644 index 000000000000..6bea30ef3df5 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h @@ -0,0 +1,1117 @@ +/* + * DCE_8_0 Register documentation + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DCE_8_0_ENUM_H +#define DCE_8_0_ENUM_H + +typedef enum SurfaceEndian { + ENDIAN_NONE = 0x0, + ENDIAN_8IN16 = 0x1, + ENDIAN_8IN32 = 0x2, + ENDIAN_8IN64 = 0x3, +} SurfaceEndian; +typedef enum ArrayMode { + ARRAY_LINEAR_GENERAL = 0x0, + ARRAY_LINEAR_ALIGNED = 0x1, + ARRAY_1D_TILED_THIN1 = 0x2, + ARRAY_1D_TILED_THICK = 0x3, + ARRAY_2D_TILED_THIN1 = 0x4, + ARRAY_PRT_TILED_THIN1 = 0x5, + ARRAY_PRT_2D_TILED_THIN1 = 0x6, + ARRAY_2D_TILED_THICK = 0x7, + ARRAY_2D_TILED_XTHICK = 0x8, + ARRAY_PRT_TILED_THICK = 0x9, + ARRAY_PRT_2D_TILED_THICK = 0xa, + ARRAY_PRT_3D_TILED_THIN1 = 0xb, + ARRAY_3D_TILED_THIN1 = 0xc, + ARRAY_3D_TILED_THICK = 0xd, + ARRAY_3D_TILED_XTHICK = 0xe, + ARRAY_PRT_3D_TILED_THICK = 0xf, +} ArrayMode; +typedef enum PipeTiling { + CONFIG_1_PIPE = 0x0, + CONFIG_2_PIPE = 0x1, + CONFIG_4_PIPE = 0x2, + CONFIG_8_PIPE = 0x3, +} PipeTiling; +typedef enum BankTiling { + CONFIG_4_BANK = 0x0, + CONFIG_8_BANK = 0x1, +} BankTiling; +typedef enum GroupInterleave { + CONFIG_256B_GROUP = 0x0, + CONFIG_512B_GROUP = 0x1, +} GroupInterleave; +typedef enum RowTiling { + CONFIG_1KB_ROW = 0x0, + CONFIG_2KB_ROW = 0x1, + CONFIG_4KB_ROW = 0x2, + CONFIG_8KB_ROW = 0x3, + CONFIG_1KB_ROW_OPT = 0x4, + CONFIG_2KB_ROW_OPT = 0x5, + CONFIG_4KB_ROW_OPT = 0x6, + CONFIG_8KB_ROW_OPT = 0x7, +} RowTiling; +typedef enum BankSwapBytes { + CONFIG_128B_SWAPS = 0x0, + CONFIG_256B_SWAPS = 0x1, + CONFIG_512B_SWAPS = 0x2, + CONFIG_1KB_SWAPS = 0x3, +} BankSwapBytes; +typedef enum SampleSplitBytes { + CONFIG_1KB_SPLIT = 0x0, + CONFIG_2KB_SPLIT = 0x1, + CONFIG_4KB_SPLIT = 0x2, + CONFIG_8KB_SPLIT = 0x3, +} SampleSplitBytes; +typedef enum NumPipes { + ADDR_CONFIG_1_PIPE = 0x0, + ADDR_CONFIG_2_PIPE = 0x1, + ADDR_CONFIG_4_PIPE = 0x2, + ADDR_CONFIG_8_PIPE = 0x3, +} NumPipes; +typedef enum PipeInterleaveSize { + ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0, + ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1, +} PipeInterleaveSize; +typedef enum BankInterleaveSize { + ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0, + ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1, + ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2, + ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3, +} BankInterleaveSize; +typedef enum NumShaderEngines { + ADDR_CONFIG_1_SHADER_ENGINE = 0x0, + ADDR_CONFIG_2_SHADER_ENGINE = 0x1, +} NumShaderEngines; +typedef enum ShaderEngineTileSize { + ADDR_CONFIG_SE_TILE_16 = 0x0, + ADDR_CONFIG_SE_TILE_32 = 0x1, +} ShaderEngineTileSize; +typedef enum NumGPUs { + ADDR_CONFIG_1_GPU = 0x0, + ADDR_CONFIG_2_GPU = 0x1, + ADDR_CONFIG_4_GPU = 0x2, +} NumGPUs; +typedef enum MultiGPUTileSize { + ADDR_CONFIG_GPU_TILE_16 = 0x0, + ADDR_CONFIG_GPU_TILE_32 = 0x1, + ADDR_CONFIG_GPU_TILE_64 = 0x2, + ADDR_CONFIG_GPU_TILE_128 = 0x3, +} MultiGPUTileSize; +typedef enum RowSize { + ADDR_CONFIG_1KB_ROW = 0x0, + ADDR_CONFIG_2KB_ROW = 0x1, + ADDR_CONFIG_4KB_ROW = 0x2, +} RowSize; +typedef enum NumLowerPipes { + ADDR_CONFIG_1_LOWER_PIPES = 0x0, + ADDR_CONFIG_2_LOWER_PIPES = 0x1, +} NumLowerPipes; +typedef enum DebugBlockId { + DBG_CLIENT_BLKID_RESERVED = 0x0, + DBG_CLIENT_BLKID_dbg = 0x1, + DBG_CLIENT_BLKID_uvdu_0 = 0x2, + DBG_CLIENT_BLKID_uvdu_1 = 0x3, + DBG_CLIENT_BLKID_uvdu_2 = 0x4, + DBG_CLIENT_BLKID_uvdu_3 = 0x5, + DBG_CLIENT_BLKID_uvdu_4 = 0x6, + DBG_CLIENT_BLKID_uvdu_5 = 0x7, + DBG_CLIENT_BLKID_uvdu_6 = 0x8, + DBG_CLIENT_BLKID_uvdm_0 = 0x9, + DBG_CLIENT_BLKID_uvdm_1 = 0xa, + DBG_CLIENT_BLKID_uvdm_2 = 0xb, + DBG_CLIENT_BLKID_uvdm_3 = 0xc, + DBG_CLIENT_BLKID_vcea_0 = 0xd, + DBG_CLIENT_BLKID_vcea_1 = 0xe, + DBG_CLIENT_BLKID_vcea_2 = 0xf, + DBG_CLIENT_BLKID_vcea_3 = 0x10, + DBG_CLIENT_BLKID_vcea_4 = 0x11, + DBG_CLIENT_BLKID_vcea_5 = 0x12, + DBG_CLIENT_BLKID_vcea_6 = 0x13, + DBG_CLIENT_BLKID_vceb_0 = 0x14, + DBG_CLIENT_BLKID_vceb_1 = 0x15, + DBG_CLIENT_BLKID_vceb_2 = 0x16, + DBG_CLIENT_BLKID_dco = 0x17, + DBG_CLIENT_BLKID_xdma = 0x18, + DBG_CLIENT_BLKID_smu_0 = 0x19, + DBG_CLIENT_BLKID_smu_1 = 0x1a, + DBG_CLIENT_BLKID_smu_2 = 0x1b, + DBG_CLIENT_BLKID_gck = 0x1c, + DBG_CLIENT_BLKID_tmonw0 = 0x1d, + DBG_CLIENT_BLKID_tmonw1 = 0x1e, + DBG_CLIENT_BLKID_grbm = 0x1f, + DBG_CLIENT_BLKID_rlc = 0x20, + DBG_CLIENT_BLKID_ds0 = 0x21, + DBG_CLIENT_BLKID_cpg_0 = 0x22, + DBG_CLIENT_BLKID_cpg_1 = 0x23, + DBG_CLIENT_BLKID_cpc_0 = 0x24, + DBG_CLIENT_BLKID_cpc_1 = 0x25, + DBG_CLIENT_BLKID_cpf = 0x26, + DBG_CLIENT_BLKID_scf0 = 0x27, + DBG_CLIENT_BLKID_scf1 = 0x28, + DBG_CLIENT_BLKID_scf2 = 0x29, + DBG_CLIENT_BLKID_scf3 = 0x2a, + DBG_CLIENT_BLKID_pc0 = 0x2b, + DBG_CLIENT_BLKID_pc1 = 0x2c, + DBG_CLIENT_BLKID_pc2 = 0x2d, + DBG_CLIENT_BLKID_pc3 = 0x2e, + DBG_CLIENT_BLKID_vgt0 = 0x2f, + DBG_CLIENT_BLKID_vgt1 = 0x30, + DBG_CLIENT_BLKID_vgt2 = 0x31, + DBG_CLIENT_BLKID_vgt3 = 0x32, + DBG_CLIENT_BLKID_sx00 = 0x33, + DBG_CLIENT_BLKID_sx10 = 0x34, + DBG_CLIENT_BLKID_sx20 = 0x35, + DBG_CLIENT_BLKID_sx30 = 0x36, + DBG_CLIENT_BLKID_cb001 = 0x37, + DBG_CLIENT_BLKID_cb200 = 0x38, + DBG_CLIENT_BLKID_cb201 = 0x39, + DBG_CLIENT_BLKID_cbr0 = 0x3a, + DBG_CLIENT_BLKID_cb000 = 0x3b, + DBG_CLIENT_BLKID_cb101 = 0x3c, + DBG_CLIENT_BLKID_cb300 = 0x3d, + DBG_CLIENT_BLKID_cb301 = 0x3e, + DBG_CLIENT_BLKID_cbr1 = 0x3f, + DBG_CLIENT_BLKID_cb100 = 0x40, + DBG_CLIENT_BLKID_ia0 = 0x41, + DBG_CLIENT_BLKID_ia1 = 0x42, + DBG_CLIENT_BLKID_bci0 = 0x43, + DBG_CLIENT_BLKID_bci1 = 0x44, + DBG_CLIENT_BLKID_bci2 = 0x45, + DBG_CLIENT_BLKID_bci3 = 0x46, + DBG_CLIENT_BLKID_pa0 = 0x47, + DBG_CLIENT_BLKID_pa1 = 0x48, + DBG_CLIENT_BLKID_spim0 = 0x49, + DBG_CLIENT_BLKID_spim1 = 0x4a, + DBG_CLIENT_BLKID_spim2 = 0x4b, + DBG_CLIENT_BLKID_spim3 = 0x4c, + DBG_CLIENT_BLKID_sdma = 0x4d, + DBG_CLIENT_BLKID_ih = 0x4e, + DBG_CLIENT_BLKID_sem = 0x4f, + DBG_CLIENT_BLKID_srbm = 0x50, + DBG_CLIENT_BLKID_hdp = 0x51, + DBG_CLIENT_BLKID_acp_0 = 0x52, + DBG_CLIENT_BLKID_acp_1 = 0x53, + DBG_CLIENT_BLKID_sam = 0x54, + DBG_CLIENT_BLKID_mcc0 = 0x55, + DBG_CLIENT_BLKID_mcc1 = 0x56, + DBG_CLIENT_BLKID_mcc2 = 0x57, + DBG_CLIENT_BLKID_mcc3 = 0x58, + DBG_CLIENT_BLKID_mcd0 = 0x59, + DBG_CLIENT_BLKID_mcd1 = 0x5a, + DBG_CLIENT_BLKID_mcd2 = 0x5b, + DBG_CLIENT_BLKID_mcd3 = 0x5c, + DBG_CLIENT_BLKID_mcb = 0x5d, + DBG_CLIENT_BLKID_vmc = 0x5e, + DBG_CLIENT_BLKID_gmcon = 0x5f, + DBG_CLIENT_BLKID_gdc_0 = 0x60, + DBG_CLIENT_BLKID_gdc_1 = 0x61, + DBG_CLIENT_BLKID_gdc_2 = 0x62, + DBG_CLIENT_BLKID_gdc_3 = 0x63, + DBG_CLIENT_BLKID_gdc_4 = 0x64, + DBG_CLIENT_BLKID_gdc_5 = 0x65, + DBG_CLIENT_BLKID_gdc_6 = 0x66, + DBG_CLIENT_BLKID_gdc_7 = 0x67, + DBG_CLIENT_BLKID_gdc_8 = 0x68, + DBG_CLIENT_BLKID_gdc_9 = 0x69, + DBG_CLIENT_BLKID_gdc_10 = 0x6a, + DBG_CLIENT_BLKID_gdc_11 = 0x6b, + DBG_CLIENT_BLKID_gdc_12 = 0x6c, + DBG_CLIENT_BLKID_gdc_13 = 0x6d, + DBG_CLIENT_BLKID_gdc_14 = 0x6e, + DBG_CLIENT_BLKID_gdc_15 = 0x6f, + DBG_CLIENT_BLKID_gdc_16 = 0x70, + DBG_CLIENT_BLKID_gdc_17 = 0x71, + DBG_CLIENT_BLKID_gdc_18 = 0x72, + DBG_CLIENT_BLKID_gdc_19 = 0x73, + DBG_CLIENT_BLKID_gdc_20 = 0x74, + DBG_CLIENT_BLKID_gdc_21 = 0x75, + DBG_CLIENT_BLKID_gdc_22 = 0x76, + DBG_CLIENT_BLKID_wd = 0x77, + DBG_CLIENT_BLKID_sdma_0 = 0x78, + DBG_CLIENT_BLKID_sdma_1 = 0x79, +} DebugBlockId; +typedef enum DebugBlockId_OLD { + DBG_BLOCK_ID_RESERVED = 0x0, + DBG_BLOCK_ID_DBG = 0x1, + DBG_BLOCK_ID_VMC = 0x2, + DBG_BLOCK_ID_PDMA = 0x3, + DBG_BLOCK_ID_CG = 0x4, + DBG_BLOCK_ID_SRBM = 0x5, + DBG_BLOCK_ID_GRBM = 0x6, + DBG_BLOCK_ID_RLC = 0x7, + DBG_BLOCK_ID_CSC = 0x8, + DBG_BLOCK_ID_SEM = 0x9, + DBG_BLOCK_ID_IH = 0xa, + DBG_BLOCK_ID_SC = 0xb, + DBG_BLOCK_ID_SQ = 0xc, + DBG_BLOCK_ID_AVP = 0xd, + DBG_BLOCK_ID_GMCON = 0xe, + DBG_BLOCK_ID_SMU = 0xf, + DBG_BLOCK_ID_DMA0 = 0x10, + DBG_BLOCK_ID_DMA1 = 0x11, + DBG_BLOCK_ID_SPIM = 0x12, + DBG_BLOCK_ID_GDS = 0x13, + DBG_BLOCK_ID_SPIS = 0x14, + DBG_BLOCK_ID_UNUSED0 = 0x15, + DBG_BLOCK_ID_PA0 = 0x16, + DBG_BLOCK_ID_PA1 = 0x17, + DBG_BLOCK_ID_CP0 = 0x18, + DBG_BLOCK_ID_CP1 = 0x19, + DBG_BLOCK_ID_CP2 = 0x1a, + DBG_BLOCK_ID_UNUSED1 = 0x1b, + DBG_BLOCK_ID_UVDU = 0x1c, + DBG_BLOCK_ID_UVDM = 0x1d, + DBG_BLOCK_ID_VCE = 0x1e, + DBG_BLOCK_ID_UNUSED2 = 0x1f, + DBG_BLOCK_ID_VGT0 = 0x20, + DBG_BLOCK_ID_VGT1 = 0x21, + DBG_BLOCK_ID_IA = 0x22, + DBG_BLOCK_ID_UNUSED3 = 0x23, + DBG_BLOCK_ID_SCT0 = 0x24, + DBG_BLOCK_ID_SCT1 = 0x25, + DBG_BLOCK_ID_SPM0 = 0x26, + DBG_BLOCK_ID_SPM1 = 0x27, + DBG_BLOCK_ID_TCAA = 0x28, + DBG_BLOCK_ID_TCAB = 0x29, + DBG_BLOCK_ID_TCCA = 0x2a, + DBG_BLOCK_ID_TCCB = 0x2b, + DBG_BLOCK_ID_MCC0 = 0x2c, + DBG_BLOCK_ID_MCC1 = 0x2d, + DBG_BLOCK_ID_MCC2 = 0x2e, + DBG_BLOCK_ID_MCC3 = 0x2f, + DBG_BLOCK_ID_SX0 = 0x30, + DBG_BLOCK_ID_SX1 = 0x31, + DBG_BLOCK_ID_SX2 = 0x32, + DBG_BLOCK_ID_SX3 = 0x33, + DBG_BLOCK_ID_UNUSED4 = 0x34, + DBG_BLOCK_ID_UNUSED5 = 0x35, + DBG_BLOCK_ID_UNUSED6 = 0x36, + DBG_BLOCK_ID_UNUSED7 = 0x37, + DBG_BLOCK_ID_PC0 = 0x38, + DBG_BLOCK_ID_PC1 = 0x39, + DBG_BLOCK_ID_UNUSED8 = 0x3a, + DBG_BLOCK_ID_UNUSED9 = 0x3b, + DBG_BLOCK_ID_UNUSED10 = 0x3c, + DBG_BLOCK_ID_UNUSED11 = 0x3d, + DBG_BLOCK_ID_MCB = 0x3e, + DBG_BLOCK_ID_UNUSED12 = 0x3f, + DBG_BLOCK_ID_SCB0 = 0x40, + DBG_BLOCK_ID_SCB1 = 0x41, + DBG_BLOCK_ID_UNUSED13 = 0x42, + DBG_BLOCK_ID_UNUSED14 = 0x43, + DBG_BLOCK_ID_SCF0 = 0x44, + DBG_BLOCK_ID_SCF1 = 0x45, + DBG_BLOCK_ID_UNUSED15 = 0x46, + DBG_BLOCK_ID_UNUSED16 = 0x47, + DBG_BLOCK_ID_BCI0 = 0x48, + DBG_BLOCK_ID_BCI1 = 0x49, + DBG_BLOCK_ID_BCI2 = 0x4a, + DBG_BLOCK_ID_BCI3 = 0x4b, + DBG_BLOCK_ID_UNUSED17 = 0x4c, + DBG_BLOCK_ID_UNUSED18 = 0x4d, + DBG_BLOCK_ID_UNUSED19 = 0x4e, + DBG_BLOCK_ID_UNUSED20 = 0x4f, + DBG_BLOCK_ID_CB00 = 0x50, + DBG_BLOCK_ID_CB01 = 0x51, + DBG_BLOCK_ID_CB02 = 0x52, + DBG_BLOCK_ID_CB03 = 0x53, + DBG_BLOCK_ID_CB04 = 0x54, + DBG_BLOCK_ID_UNUSED21 = 0x55, + DBG_BLOCK_ID_UNUSED22 = 0x56, + DBG_BLOCK_ID_UNUSED23 = 0x57, + DBG_BLOCK_ID_CB10 = 0x58, + DBG_BLOCK_ID_CB11 = 0x59, + DBG_BLOCK_ID_CB12 = 0x5a, + DBG_BLOCK_ID_CB13 = 0x5b, + DBG_BLOCK_ID_CB14 = 0x5c, + DBG_BLOCK_ID_UNUSED24 = 0x5d, + DBG_BLOCK_ID_UNUSED25 = 0x5e, + DBG_BLOCK_ID_UNUSED26 = 0x5f, + DBG_BLOCK_ID_TCP0 = 0x60, + DBG_BLOCK_ID_TCP1 = 0x61, + DBG_BLOCK_ID_TCP2 = 0x62, + DBG_BLOCK_ID_TCP3 = 0x63, + DBG_BLOCK_ID_TCP4 = 0x64, + DBG_BLOCK_ID_TCP5 = 0x65, + DBG_BLOCK_ID_TCP6 = 0x66, + DBG_BLOCK_ID_TCP7 = 0x67, + DBG_BLOCK_ID_TCP8 = 0x68, + DBG_BLOCK_ID_TCP9 = 0x69, + DBG_BLOCK_ID_TCP10 = 0x6a, + DBG_BLOCK_ID_TCP11 = 0x6b, + DBG_BLOCK_ID_TCP12 = 0x6c, + DBG_BLOCK_ID_TCP13 = 0x6d, + DBG_BLOCK_ID_TCP14 = 0x6e, + DBG_BLOCK_ID_TCP15 = 0x6f, + DBG_BLOCK_ID_TCP16 = 0x70, + DBG_BLOCK_ID_TCP17 = 0x71, + DBG_BLOCK_ID_TCP18 = 0x72, + DBG_BLOCK_ID_TCP19 = 0x73, + DBG_BLOCK_ID_TCP20 = 0x74, + DBG_BLOCK_ID_TCP21 = 0x75, + DBG_BLOCK_ID_TCP22 = 0x76, + DBG_BLOCK_ID_TCP23 = 0x77, + DBG_BLOCK_ID_TCP_RESERVED0 = 0x78, + DBG_BLOCK_ID_TCP_RESERVED1 = 0x79, + DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a, + DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b, + DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c, + DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d, + DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e, + DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f, + DBG_BLOCK_ID_DB00 = 0x80, + DBG_BLOCK_ID_DB01 = 0x81, + DBG_BLOCK_ID_DB02 = 0x82, + DBG_BLOCK_ID_DB03 = 0x83, + DBG_BLOCK_ID_DB04 = 0x84, + DBG_BLOCK_ID_UNUSED27 = 0x85, + DBG_BLOCK_ID_UNUSED28 = 0x86, + DBG_BLOCK_ID_UNUSED29 = 0x87, + DBG_BLOCK_ID_DB10 = 0x88, + DBG_BLOCK_ID_DB11 = 0x89, + DBG_BLOCK_ID_DB12 = 0x8a, + DBG_BLOCK_ID_DB13 = 0x8b, + DBG_BLOCK_ID_DB14 = 0x8c, + DBG_BLOCK_ID_UNUSED30 = 0x8d, + DBG_BLOCK_ID_UNUSED31 = 0x8e, + DBG_BLOCK_ID_UNUSED32 = 0x8f, + DBG_BLOCK_ID_TCC0 = 0x90, + DBG_BLOCK_ID_TCC1 = 0x91, + DBG_BLOCK_ID_TCC2 = 0x92, + DBG_BLOCK_ID_TCC3 = 0x93, + DBG_BLOCK_ID_TCC4 = 0x94, + DBG_BLOCK_ID_TCC5 = 0x95, + DBG_BLOCK_ID_TCC6 = 0x96, + DBG_BLOCK_ID_TCC7 = 0x97, + DBG_BLOCK_ID_SPS00 = 0x98, + DBG_BLOCK_ID_SPS01 = 0x99, + DBG_BLOCK_ID_SPS02 = 0x9a, + DBG_BLOCK_ID_SPS10 = 0x9b, + DBG_BLOCK_ID_SPS11 = 0x9c, + DBG_BLOCK_ID_SPS12 = 0x9d, + DBG_BLOCK_ID_UNUSED33 = 0x9e, + DBG_BLOCK_ID_UNUSED34 = 0x9f, + DBG_BLOCK_ID_TA00 = 0xa0, + DBG_BLOCK_ID_TA01 = 0xa1, + DBG_BLOCK_ID_TA02 = 0xa2, + DBG_BLOCK_ID_TA03 = 0xa3, + DBG_BLOCK_ID_TA04 = 0xa4, + DBG_BLOCK_ID_TA05 = 0xa5, + DBG_BLOCK_ID_TA06 = 0xa6, + DBG_BLOCK_ID_TA07 = 0xa7, + DBG_BLOCK_ID_TA08 = 0xa8, + DBG_BLOCK_ID_TA09 = 0xa9, + DBG_BLOCK_ID_TA0A = 0xaa, + DBG_BLOCK_ID_TA0B = 0xab, + DBG_BLOCK_ID_UNUSED35 = 0xac, + DBG_BLOCK_ID_UNUSED36 = 0xad, + DBG_BLOCK_ID_UNUSED37 = 0xae, + DBG_BLOCK_ID_UNUSED38 = 0xaf, + DBG_BLOCK_ID_TA10 = 0xb0, + DBG_BLOCK_ID_TA11 = 0xb1, + DBG_BLOCK_ID_TA12 = 0xb2, + DBG_BLOCK_ID_TA13 = 0xb3, + DBG_BLOCK_ID_TA14 = 0xb4, + DBG_BLOCK_ID_TA15 = 0xb5, + DBG_BLOCK_ID_TA16 = 0xb6, + DBG_BLOCK_ID_TA17 = 0xb7, + DBG_BLOCK_ID_TA18 = 0xb8, + DBG_BLOCK_ID_TA19 = 0xb9, + DBG_BLOCK_ID_TA1A = 0xba, + DBG_BLOCK_ID_TA1B = 0xbb, + DBG_BLOCK_ID_UNUSED39 = 0xbc, + DBG_BLOCK_ID_UNUSED40 = 0xbd, + DBG_BLOCK_ID_UNUSED41 = 0xbe, + DBG_BLOCK_ID_UNUSED42 = 0xbf, + DBG_BLOCK_ID_TD00 = 0xc0, + DBG_BLOCK_ID_TD01 = 0xc1, + DBG_BLOCK_ID_TD02 = 0xc2, + DBG_BLOCK_ID_TD03 = 0xc3, + DBG_BLOCK_ID_TD04 = 0xc4, + DBG_BLOCK_ID_TD05 = 0xc5, + DBG_BLOCK_ID_TD06 = 0xc6, + DBG_BLOCK_ID_TD07 = 0xc7, + DBG_BLOCK_ID_TD08 = 0xc8, + DBG_BLOCK_ID_TD09 = 0xc9, + DBG_BLOCK_ID_TD0A = 0xca, + DBG_BLOCK_ID_TD0B = 0xcb, + DBG_BLOCK_ID_UNUSED43 = 0xcc, + DBG_BLOCK_ID_UNUSED44 = 0xcd, + DBG_BLOCK_ID_UNUSED45 = 0xce, + DBG_BLOCK_ID_UNUSED46 = 0xcf, + DBG_BLOCK_ID_TD10 = 0xd0, + DBG_BLOCK_ID_TD11 = 0xd1, + DBG_BLOCK_ID_TD12 = 0xd2, + DBG_BLOCK_ID_TD13 = 0xd3, + DBG_BLOCK_ID_TD14 = 0xd4, + DBG_BLOCK_ID_TD15 = 0xd5, + DBG_BLOCK_ID_TD16 = 0xd6, + DBG_BLOCK_ID_TD17 = 0xd7, + DBG_BLOCK_ID_TD18 = 0xd8, + DBG_BLOCK_ID_TD19 = 0xd9, + DBG_BLOCK_ID_TD1A = 0xda, + DBG_BLOCK_ID_TD1B = 0xdb, + DBG_BLOCK_ID_UNUSED47 = 0xdc, + DBG_BLOCK_ID_UNUSED48 = 0xdd, + DBG_BLOCK_ID_UNUSED49 = 0xde, + DBG_BLOCK_ID_UNUSED50 = 0xdf, + DBG_BLOCK_ID_MCD0 = 0xe0, + DBG_BLOCK_ID_MCD1 = 0xe1, + DBG_BLOCK_ID_MCD2 = 0xe2, + DBG_BLOCK_ID_MCD3 = 0xe3, + DBG_BLOCK_ID_MCD4 = 0xe4, + DBG_BLOCK_ID_MCD5 = 0xe5, + DBG_BLOCK_ID_UNUSED51 = 0xe6, + DBG_BLOCK_ID_UNUSED52 = 0xe7, +} DebugBlockId_OLD; +typedef enum DebugBlockId_BY2 { + DBG_BLOCK_ID_RESERVED_BY2 = 0x0, + DBG_BLOCK_ID_VMC_BY2 = 0x1, + DBG_BLOCK_ID_CG_BY2 = 0x2, + DBG_BLOCK_ID_GRBM_BY2 = 0x3, + DBG_BLOCK_ID_CSC_BY2 = 0x4, + DBG_BLOCK_ID_IH_BY2 = 0x5, + DBG_BLOCK_ID_SQ_BY2 = 0x6, + DBG_BLOCK_ID_GMCON_BY2 = 0x7, + DBG_BLOCK_ID_DMA0_BY2 = 0x8, + DBG_BLOCK_ID_SPIM_BY2 = 0x9, + DBG_BLOCK_ID_SPIS_BY2 = 0xa, + DBG_BLOCK_ID_PA0_BY2 = 0xb, + DBG_BLOCK_ID_CP0_BY2 = 0xc, + DBG_BLOCK_ID_CP2_BY2 = 0xd, + DBG_BLOCK_ID_UVDU_BY2 = 0xe, + DBG_BLOCK_ID_VCE_BY2 = 0xf, + DBG_BLOCK_ID_VGT0_BY2 = 0x10, + DBG_BLOCK_ID_IA_BY2 = 0x11, + DBG_BLOCK_ID_SCT0_BY2 = 0x12, + DBG_BLOCK_ID_SPM0_BY2 = 0x13, + DBG_BLOCK_ID_TCAA_BY2 = 0x14, + DBG_BLOCK_ID_TCCA_BY2 = 0x15, + DBG_BLOCK_ID_MCC0_BY2 = 0x16, + DBG_BLOCK_ID_MCC2_BY2 = 0x17, + DBG_BLOCK_ID_SX0_BY2 = 0x18, + DBG_BLOCK_ID_SX2_BY2 = 0x19, + DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a, + DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b, + DBG_BLOCK_ID_PC0_BY2 = 0x1c, + DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d, + DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e, + DBG_BLOCK_ID_MCB_BY2 = 0x1f, + DBG_BLOCK_ID_SCB0_BY2 = 0x20, + DBG_BLOCK_ID_UNUSED13_BY2 = 0x21, + DBG_BLOCK_ID_SCF0_BY2 = 0x22, + DBG_BLOCK_ID_UNUSED15_BY2 = 0x23, + DBG_BLOCK_ID_BCI0_BY2 = 0x24, + DBG_BLOCK_ID_BCI2_BY2 = 0x25, + DBG_BLOCK_ID_UNUSED17_BY2 = 0x26, + DBG_BLOCK_ID_UNUSED19_BY2 = 0x27, + DBG_BLOCK_ID_CB00_BY2 = 0x28, + DBG_BLOCK_ID_CB02_BY2 = 0x29, + DBG_BLOCK_ID_CB04_BY2 = 0x2a, + DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b, + DBG_BLOCK_ID_CB10_BY2 = 0x2c, + DBG_BLOCK_ID_CB12_BY2 = 0x2d, + DBG_BLOCK_ID_CB14_BY2 = 0x2e, + DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f, + DBG_BLOCK_ID_TCP0_BY2 = 0x30, + DBG_BLOCK_ID_TCP2_BY2 = 0x31, + DBG_BLOCK_ID_TCP4_BY2 = 0x32, + DBG_BLOCK_ID_TCP6_BY2 = 0x33, + DBG_BLOCK_ID_TCP8_BY2 = 0x34, + DBG_BLOCK_ID_TCP10_BY2 = 0x35, + DBG_BLOCK_ID_TCP12_BY2 = 0x36, + DBG_BLOCK_ID_TCP14_BY2 = 0x37, + DBG_BLOCK_ID_TCP16_BY2 = 0x38, + DBG_BLOCK_ID_TCP18_BY2 = 0x39, + DBG_BLOCK_ID_TCP20_BY2 = 0x3a, + DBG_BLOCK_ID_TCP22_BY2 = 0x3b, + DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c, + DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d, + DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e, + DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f, + DBG_BLOCK_ID_DB00_BY2 = 0x40, + DBG_BLOCK_ID_DB02_BY2 = 0x41, + DBG_BLOCK_ID_DB04_BY2 = 0x42, + DBG_BLOCK_ID_UNUSED28_BY2 = 0x43, + DBG_BLOCK_ID_DB10_BY2 = 0x44, + DBG_BLOCK_ID_DB12_BY2 = 0x45, + DBG_BLOCK_ID_DB14_BY2 = 0x46, + DBG_BLOCK_ID_UNUSED31_BY2 = 0x47, + DBG_BLOCK_ID_TCC0_BY2 = 0x48, + DBG_BLOCK_ID_TCC2_BY2 = 0x49, + DBG_BLOCK_ID_TCC4_BY2 = 0x4a, + DBG_BLOCK_ID_TCC6_BY2 = 0x4b, + DBG_BLOCK_ID_SPS00_BY2 = 0x4c, + DBG_BLOCK_ID_SPS02_BY2 = 0x4d, + DBG_BLOCK_ID_SPS11_BY2 = 0x4e, + DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f, + DBG_BLOCK_ID_TA00_BY2 = 0x50, + DBG_BLOCK_ID_TA02_BY2 = 0x51, + DBG_BLOCK_ID_TA04_BY2 = 0x52, + DBG_BLOCK_ID_TA06_BY2 = 0x53, + DBG_BLOCK_ID_TA08_BY2 = 0x54, + DBG_BLOCK_ID_TA0A_BY2 = 0x55, + DBG_BLOCK_ID_UNUSED35_BY2 = 0x56, + DBG_BLOCK_ID_UNUSED37_BY2 = 0x57, + DBG_BLOCK_ID_TA10_BY2 = 0x58, + DBG_BLOCK_ID_TA12_BY2 = 0x59, + DBG_BLOCK_ID_TA14_BY2 = 0x5a, + DBG_BLOCK_ID_TA16_BY2 = 0x5b, + DBG_BLOCK_ID_TA18_BY2 = 0x5c, + DBG_BLOCK_ID_TA1A_BY2 = 0x5d, + DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e, + DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f, + DBG_BLOCK_ID_TD00_BY2 = 0x60, + DBG_BLOCK_ID_TD02_BY2 = 0x61, + DBG_BLOCK_ID_TD04_BY2 = 0x62, + DBG_BLOCK_ID_TD06_BY2 = 0x63, + DBG_BLOCK_ID_TD08_BY2 = 0x64, + DBG_BLOCK_ID_TD0A_BY2 = 0x65, + DBG_BLOCK_ID_UNUSED43_BY2 = 0x66, + DBG_BLOCK_ID_UNUSED45_BY2 = 0x67, + DBG_BLOCK_ID_TD10_BY2 = 0x68, + DBG_BLOCK_ID_TD12_BY2 = 0x69, + DBG_BLOCK_ID_TD14_BY2 = 0x6a, + DBG_BLOCK_ID_TD16_BY2 = 0x6b, + DBG_BLOCK_ID_TD18_BY2 = 0x6c, + DBG_BLOCK_ID_TD1A_BY2 = 0x6d, + DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e, + DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f, + DBG_BLOCK_ID_MCD0_BY2 = 0x70, + DBG_BLOCK_ID_MCD2_BY2 = 0x71, + DBG_BLOCK_ID_MCD4_BY2 = 0x72, + DBG_BLOCK_ID_UNUSED51_BY2 = 0x73, +} DebugBlockId_BY2; +typedef enum DebugBlockId_BY4 { + DBG_BLOCK_ID_RESERVED_BY4 = 0x0, + DBG_BLOCK_ID_CG_BY4 = 0x1, + DBG_BLOCK_ID_CSC_BY4 = 0x2, + DBG_BLOCK_ID_SQ_BY4 = 0x3, + DBG_BLOCK_ID_DMA0_BY4 = 0x4, + DBG_BLOCK_ID_SPIS_BY4 = 0x5, + DBG_BLOCK_ID_CP0_BY4 = 0x6, + DBG_BLOCK_ID_UVDU_BY4 = 0x7, + DBG_BLOCK_ID_VGT0_BY4 = 0x8, + DBG_BLOCK_ID_SCT0_BY4 = 0x9, + DBG_BLOCK_ID_TCAA_BY4 = 0xa, + DBG_BLOCK_ID_MCC0_BY4 = 0xb, + DBG_BLOCK_ID_SX0_BY4 = 0xc, + DBG_BLOCK_ID_UNUSED4_BY4 = 0xd, + DBG_BLOCK_ID_PC0_BY4 = 0xe, + DBG_BLOCK_ID_UNUSED10_BY4 = 0xf, + DBG_BLOCK_ID_SCB0_BY4 = 0x10, + DBG_BLOCK_ID_SCF0_BY4 = 0x11, + DBG_BLOCK_ID_BCI0_BY4 = 0x12, + DBG_BLOCK_ID_UNUSED17_BY4 = 0x13, + DBG_BLOCK_ID_CB00_BY4 = 0x14, + DBG_BLOCK_ID_CB04_BY4 = 0x15, + DBG_BLOCK_ID_CB10_BY4 = 0x16, + DBG_BLOCK_ID_CB14_BY4 = 0x17, + DBG_BLOCK_ID_TCP0_BY4 = 0x18, + DBG_BLOCK_ID_TCP4_BY4 = 0x19, + DBG_BLOCK_ID_TCP8_BY4 = 0x1a, + DBG_BLOCK_ID_TCP12_BY4 = 0x1b, + DBG_BLOCK_ID_TCP16_BY4 = 0x1c, + DBG_BLOCK_ID_TCP20_BY4 = 0x1d, + DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e, + DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f, + DBG_BLOCK_ID_DB_BY4 = 0x20, + DBG_BLOCK_ID_DB04_BY4 = 0x21, + DBG_BLOCK_ID_DB10_BY4 = 0x22, + DBG_BLOCK_ID_DB14_BY4 = 0x23, + DBG_BLOCK_ID_TCC0_BY4 = 0x24, + DBG_BLOCK_ID_TCC4_BY4 = 0x25, + DBG_BLOCK_ID_SPS00_BY4 = 0x26, + DBG_BLOCK_ID_SPS11_BY4 = 0x27, + DBG_BLOCK_ID_TA00_BY4 = 0x28, + DBG_BLOCK_ID_TA04_BY4 = 0x29, + DBG_BLOCK_ID_TA08_BY4 = 0x2a, + DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b, + DBG_BLOCK_ID_TA10_BY4 = 0x2c, + DBG_BLOCK_ID_TA14_BY4 = 0x2d, + DBG_BLOCK_ID_TA18_BY4 = 0x2e, + DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f, + DBG_BLOCK_ID_TD00_BY4 = 0x30, + DBG_BLOCK_ID_TD04_BY4 = 0x31, + DBG_BLOCK_ID_TD08_BY4 = 0x32, + DBG_BLOCK_ID_UNUSED43_BY4 = 0x33, + DBG_BLOCK_ID_TD10_BY4 = 0x34, + DBG_BLOCK_ID_TD14_BY4 = 0x35, + DBG_BLOCK_ID_TD18_BY4 = 0x36, + DBG_BLOCK_ID_UNUSED47_BY4 = 0x37, + DBG_BLOCK_ID_MCD0_BY4 = 0x38, + DBG_BLOCK_ID_MCD4_BY4 = 0x39, +} DebugBlockId_BY4; +typedef enum DebugBlockId_BY8 { + DBG_BLOCK_ID_RESERVED_BY8 = 0x0, + DBG_BLOCK_ID_CSC_BY8 = 0x1, + DBG_BLOCK_ID_DMA0_BY8 = 0x2, + DBG_BLOCK_ID_CP0_BY8 = 0x3, + DBG_BLOCK_ID_VGT0_BY8 = 0x4, + DBG_BLOCK_ID_TCAA_BY8 = 0x5, + DBG_BLOCK_ID_SX0_BY8 = 0x6, + DBG_BLOCK_ID_PC0_BY8 = 0x7, + DBG_BLOCK_ID_SCB0_BY8 = 0x8, + DBG_BLOCK_ID_BCI0_BY8 = 0x9, + DBG_BLOCK_ID_CB00_BY8 = 0xa, + DBG_BLOCK_ID_CB10_BY8 = 0xb, + DBG_BLOCK_ID_TCP0_BY8 = 0xc, + DBG_BLOCK_ID_TCP8_BY8 = 0xd, + DBG_BLOCK_ID_TCP16_BY8 = 0xe, + DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf, + DBG_BLOCK_ID_DB00_BY8 = 0x10, + DBG_BLOCK_ID_DB10_BY8 = 0x11, + DBG_BLOCK_ID_TCC0_BY8 = 0x12, + DBG_BLOCK_ID_SPS00_BY8 = 0x13, + DBG_BLOCK_ID_TA00_BY8 = 0x14, + DBG_BLOCK_ID_TA08_BY8 = 0x15, + DBG_BLOCK_ID_TA10_BY8 = 0x16, + DBG_BLOCK_ID_TA18_BY8 = 0x17, + DBG_BLOCK_ID_TD00_BY8 = 0x18, + DBG_BLOCK_ID_TD08_BY8 = 0x19, + DBG_BLOCK_ID_TD10_BY8 = 0x1a, + DBG_BLOCK_ID_TD18_BY8 = 0x1b, + DBG_BLOCK_ID_MCD0_BY8 = 0x1c, +} DebugBlockId_BY8; +typedef enum DebugBlockId_BY16 { + DBG_BLOCK_ID_RESERVED_BY16 = 0x0, + DBG_BLOCK_ID_DMA0_BY16 = 0x1, + DBG_BLOCK_ID_VGT0_BY16 = 0x2, + DBG_BLOCK_ID_SX0_BY16 = 0x3, + DBG_BLOCK_ID_SCB0_BY16 = 0x4, + DBG_BLOCK_ID_CB00_BY16 = 0x5, + DBG_BLOCK_ID_TCP0_BY16 = 0x6, + DBG_BLOCK_ID_TCP16_BY16 = 0x7, + DBG_BLOCK_ID_DB00_BY16 = 0x8, + DBG_BLOCK_ID_TCC0_BY16 = 0x9, + DBG_BLOCK_ID_TA00_BY16 = 0xa, + DBG_BLOCK_ID_TA10_BY16 = 0xb, + DBG_BLOCK_ID_TD00_BY16 = 0xc, + DBG_BLOCK_ID_TD10_BY16 = 0xd, + DBG_BLOCK_ID_MCD0_BY16 = 0xe, +} DebugBlockId_BY16; +typedef enum CompareRef { + REF_NEVER = 0x0, + REF_LESS = 0x1, + REF_EQUAL = 0x2, + REF_LEQUAL = 0x3, + REF_GREATER = 0x4, + REF_NOTEQUAL = 0x5, + REF_GEQUAL = 0x6, + REF_ALWAYS = 0x7, +} CompareRef; +typedef enum ReadSize { + READ_256_BITS = 0x0, + READ_512_BITS = 0x1, +} ReadSize; +typedef enum DepthFormat { + DEPTH_INVALID = 0x0, + DEPTH_16 = 0x1, + DEPTH_X8_24 = 0x2, + DEPTH_8_24 = 0x3, + DEPTH_X8_24_FLOAT = 0x4, + DEPTH_8_24_FLOAT = 0x5, + DEPTH_32_FLOAT = 0x6, + DEPTH_X24_8_32_FLOAT = 0x7, +} DepthFormat; +typedef enum ZFormat { + Z_INVALID = 0x0, + Z_16 = 0x1, + Z_24 = 0x2, + Z_32_FLOAT = 0x3, +} ZFormat; +typedef enum StencilFormat { + STENCIL_INVALID = 0x0, + STENCIL_8 = 0x1, +} StencilFormat; +typedef enum CmaskMode { + CMASK_CLEAR_NONE = 0x0, + CMASK_CLEAR_ONE = 0x1, + CMASK_CLEAR_ALL = 0x2, + CMASK_ANY_EXPANDED = 0x3, + CMASK_ALPHA0_FRAG1 = 0x4, + CMASK_ALPHA0_FRAG2 = 0x5, + CMASK_ALPHA0_FRAG4 = 0x6, + CMASK_ALPHA0_FRAGS = 0x7, + CMASK_ALPHA1_FRAG1 = 0x8, + CMASK_ALPHA1_FRAG2 = 0x9, + CMASK_ALPHA1_FRAG4 = 0xa, + CMASK_ALPHA1_FRAGS = 0xb, + CMASK_ALPHAX_FRAG1 = 0xc, + CMASK_ALPHAX_FRAG2 = 0xd, + CMASK_ALPHAX_FRAG4 = 0xe, + CMASK_ALPHAX_FRAGS = 0xf, +} CmaskMode; +typedef enum QuadExportFormat { + EXPORT_UNUSED = 0x0, + EXPORT_32_R = 0x1, + EXPORT_32_GR = 0x2, + EXPORT_32_AR = 0x3, + EXPORT_FP16_ABGR = 0x4, + EXPORT_UNSIGNED16_ABGR = 0x5, + EXPORT_SIGNED16_ABGR = 0x6, + EXPORT_32_ABGR = 0x7, +} QuadExportFormat; +typedef enum QuadExportFormatOld { + EXPORT_4P_32BPC_ABGR = 0x0, + EXPORT_4P_16BPC_ABGR = 0x1, + EXPORT_4P_32BPC_GR = 0x2, + EXPORT_4P_32BPC_AR = 0x3, + EXPORT_2P_32BPC_ABGR = 0x4, + EXPORT_8P_32BPC_R = 0x5, +} QuadExportFormatOld; +typedef enum ColorFormat { + COLOR_INVALID = 0x0, + COLOR_8 = 0x1, + COLOR_16 = 0x2, + COLOR_8_8 = 0x3, + COLOR_32 = 0x4, + COLOR_16_16 = 0x5, + COLOR_10_11_11 = 0x6, + COLOR_11_11_10 = 0x7, + COLOR_10_10_10_2 = 0x8, + COLOR_2_10_10_10 = 0x9, + COLOR_8_8_8_8 = 0xa, + COLOR_32_32 = 0xb, + COLOR_16_16_16_16 = 0xc, + COLOR_RESERVED_13 = 0xd, + COLOR_32_32_32_32 = 0xe, + COLOR_RESERVED_15 = 0xf, + COLOR_5_6_5 = 0x10, + COLOR_1_5_5_5 = 0x11, + COLOR_5_5_5_1 = 0x12, + COLOR_4_4_4_4 = 0x13, + COLOR_8_24 = 0x14, + COLOR_24_8 = 0x15, + COLOR_X24_8_32_FLOAT = 0x16, + COLOR_RESERVED_23 = 0x17, +} ColorFormat; +typedef enum SurfaceFormat { + FMT_INVALID = 0x0, + FMT_8 = 0x1, + FMT_16 = 0x2, + FMT_8_8 = 0x3, + FMT_32 = 0x4, + FMT_16_16 = 0x5, + FMT_10_11_11 = 0x6, + FMT_11_11_10 = 0x7, + FMT_10_10_10_2 = 0x8, + FMT_2_10_10_10 = 0x9, + FMT_8_8_8_8 = 0xa, + FMT_32_32 = 0xb, + FMT_16_16_16_16 = 0xc, + FMT_32_32_32 = 0xd, + FMT_32_32_32_32 = 0xe, + FMT_RESERVED_4 = 0xf, + FMT_5_6_5 = 0x10, + FMT_1_5_5_5 = 0x11, + FMT_5_5_5_1 = 0x12, + FMT_4_4_4_4 = 0x13, + FMT_8_24 = 0x14, + FMT_24_8 = 0x15, + FMT_X24_8_32_FLOAT = 0x16, + FMT_RESERVED_33 = 0x17, + FMT_11_11_10_FLOAT = 0x18, + FMT_16_FLOAT = 0x19, + FMT_32_FLOAT = 0x1a, + FMT_16_16_FLOAT = 0x1b, + FMT_8_24_FLOAT = 0x1c, + FMT_24_8_FLOAT = 0x1d, + FMT_32_32_FLOAT = 0x1e, + FMT_10_11_11_FLOAT = 0x1f, + FMT_16_16_16_16_FLOAT = 0x20, + FMT_3_3_2 = 0x21, + FMT_6_5_5 = 0x22, + FMT_32_32_32_32_FLOAT = 0x23, + FMT_RESERVED_36 = 0x24, + FMT_1 = 0x25, + FMT_1_REVERSED = 0x26, + FMT_GB_GR = 0x27, + FMT_BG_RG = 0x28, + FMT_32_AS_8 = 0x29, + FMT_32_AS_8_8 = 0x2a, + FMT_5_9_9_9_SHAREDEXP = 0x2b, + FMT_8_8_8 = 0x2c, + FMT_16_16_16 = 0x2d, + FMT_16_16_16_FLOAT = 0x2e, + FMT_4_4 = 0x2f, + FMT_32_32_32_FLOAT = 0x30, + FMT_BC1 = 0x31, + FMT_BC2 = 0x32, + FMT_BC3 = 0x33, + FMT_BC4 = 0x34, + FMT_BC5 = 0x35, + FMT_BC6 = 0x36, + FMT_BC7 = 0x37, + FMT_32_AS_32_32_32_32 = 0x38, + FMT_APC3 = 0x39, + FMT_APC4 = 0x3a, + FMT_APC5 = 0x3b, + FMT_APC6 = 0x3c, + FMT_APC7 = 0x3d, + FMT_CTX1 = 0x3e, + FMT_RESERVED_63 = 0x3f, +} SurfaceFormat; +typedef enum BUF_DATA_FORMAT { + BUF_DATA_FORMAT_INVALID = 0x0, + BUF_DATA_FORMAT_8 = 0x1, + BUF_DATA_FORMAT_16 = 0x2, + BUF_DATA_FORMAT_8_8 = 0x3, + BUF_DATA_FORMAT_32 = 0x4, + BUF_DATA_FORMAT_16_16 = 0x5, + BUF_DATA_FORMAT_10_11_11 = 0x6, + BUF_DATA_FORMAT_11_11_10 = 0x7, + BUF_DATA_FORMAT_10_10_10_2 = 0x8, + BUF_DATA_FORMAT_2_10_10_10 = 0x9, + BUF_DATA_FORMAT_8_8_8_8 = 0xa, + BUF_DATA_FORMAT_32_32 = 0xb, + BUF_DATA_FORMAT_16_16_16_16 = 0xc, + BUF_DATA_FORMAT_32_32_32 = 0xd, + BUF_DATA_FORMAT_32_32_32_32 = 0xe, + BUF_DATA_FORMAT_RESERVED_15 = 0xf, +} BUF_DATA_FORMAT; +typedef enum IMG_DATA_FORMAT { + IMG_DATA_FORMAT_INVALID = 0x0, + IMG_DATA_FORMAT_8 = 0x1, + IMG_DATA_FORMAT_16 = 0x2, + IMG_DATA_FORMAT_8_8 = 0x3, + IMG_DATA_FORMAT_32 = 0x4, + IMG_DATA_FORMAT_16_16 = 0x5, + IMG_DATA_FORMAT_10_11_11 = 0x6, + IMG_DATA_FORMAT_11_11_10 = 0x7, + IMG_DATA_FORMAT_10_10_10_2 = 0x8, + IMG_DATA_FORMAT_2_10_10_10 = 0x9, + IMG_DATA_FORMAT_8_8_8_8 = 0xa, + IMG_DATA_FORMAT_32_32 = 0xb, + IMG_DATA_FORMAT_16_16_16_16 = 0xc, + IMG_DATA_FORMAT_32_32_32 = 0xd, + IMG_DATA_FORMAT_32_32_32_32 = 0xe, + IMG_DATA_FORMAT_RESERVED_15 = 0xf, + IMG_DATA_FORMAT_5_6_5 = 0x10, + IMG_DATA_FORMAT_1_5_5_5 = 0x11, + IMG_DATA_FORMAT_5_5_5_1 = 0x12, + IMG_DATA_FORMAT_4_4_4_4 = 0x13, + IMG_DATA_FORMAT_8_24 = 0x14, + IMG_DATA_FORMAT_24_8 = 0x15, + IMG_DATA_FORMAT_X24_8_32 = 0x16, + IMG_DATA_FORMAT_RESERVED_23 = 0x17, + IMG_DATA_FORMAT_RESERVED_24 = 0x18, + IMG_DATA_FORMAT_RESERVED_25 = 0x19, + IMG_DATA_FORMAT_RESERVED_26 = 0x1a, + IMG_DATA_FORMAT_RESERVED_27 = 0x1b, + IMG_DATA_FORMAT_RESERVED_28 = 0x1c, + IMG_DATA_FORMAT_RESERVED_29 = 0x1d, + IMG_DATA_FORMAT_RESERVED_30 = 0x1e, + IMG_DATA_FORMAT_RESERVED_31 = 0x1f, + IMG_DATA_FORMAT_GB_GR = 0x20, + IMG_DATA_FORMAT_BG_RG = 0x21, + IMG_DATA_FORMAT_5_9_9_9 = 0x22, + IMG_DATA_FORMAT_BC1 = 0x23, + IMG_DATA_FORMAT_BC2 = 0x24, + IMG_DATA_FORMAT_BC3 = 0x25, + IMG_DATA_FORMAT_BC4 = 0x26, + IMG_DATA_FORMAT_BC5 = 0x27, + IMG_DATA_FORMAT_BC6 = 0x28, + IMG_DATA_FORMAT_BC7 = 0x29, + IMG_DATA_FORMAT_RESERVED_42 = 0x2a, + IMG_DATA_FORMAT_RESERVED_43 = 0x2b, + IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c, + IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d, + IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e, + IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f, + IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30, + IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31, + IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32, + IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33, + IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34, + IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35, + IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36, + IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37, + IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38, + IMG_DATA_FORMAT_4_4 = 0x39, + IMG_DATA_FORMAT_6_5_5 = 0x3a, + IMG_DATA_FORMAT_1 = 0x3b, + IMG_DATA_FORMAT_1_REVERSED = 0x3c, + IMG_DATA_FORMAT_32_AS_8 = 0x3d, + IMG_DATA_FORMAT_32_AS_8_8 = 0x3e, + IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f, +} IMG_DATA_FORMAT; +typedef enum BUF_NUM_FORMAT { + BUF_NUM_FORMAT_UNORM = 0x0, + BUF_NUM_FORMAT_SNORM = 0x1, + BUF_NUM_FORMAT_USCALED = 0x2, + BUF_NUM_FORMAT_SSCALED = 0x3, + BUF_NUM_FORMAT_UINT = 0x4, + BUF_NUM_FORMAT_SINT = 0x5, + BUF_NUM_FORMAT_SNORM_OGL = 0x6, + BUF_NUM_FORMAT_FLOAT = 0x7, +} BUF_NUM_FORMAT; +typedef enum IMG_NUM_FORMAT { + IMG_NUM_FORMAT_UNORM = 0x0, + IMG_NUM_FORMAT_SNORM = 0x1, + IMG_NUM_FORMAT_USCALED = 0x2, + IMG_NUM_FORMAT_SSCALED = 0x3, + IMG_NUM_FORMAT_UINT = 0x4, + IMG_NUM_FORMAT_SINT = 0x5, + IMG_NUM_FORMAT_SNORM_OGL = 0x6, + IMG_NUM_FORMAT_FLOAT = 0x7, + IMG_NUM_FORMAT_RESERVED_8 = 0x8, + IMG_NUM_FORMAT_SRGB = 0x9, + IMG_NUM_FORMAT_UBNORM = 0xa, + IMG_NUM_FORMAT_UBNORM_OGL = 0xb, + IMG_NUM_FORMAT_UBINT = 0xc, + IMG_NUM_FORMAT_UBSCALED = 0xd, + IMG_NUM_FORMAT_RESERVED_14 = 0xe, + IMG_NUM_FORMAT_RESERVED_15 = 0xf, +} IMG_NUM_FORMAT; +typedef enum TileType { + ARRAY_COLOR_TILE = 0x0, + ARRAY_DEPTH_TILE = 0x1, +} TileType; +typedef enum NonDispTilingOrder { + ADDR_SURF_MICRO_TILING_DISPLAY = 0x0, + ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1, +} NonDispTilingOrder; +typedef enum MicroTileMode { + ADDR_SURF_DISPLAY_MICRO_TILING = 0x0, + ADDR_SURF_THIN_MICRO_TILING = 0x1, + ADDR_SURF_DEPTH_MICRO_TILING = 0x2, + ADDR_SURF_ROTATED_MICRO_TILING = 0x3, + ADDR_SURF_THICK_MICRO_TILING = 0x4, +} MicroTileMode; +typedef enum TileSplit { + ADDR_SURF_TILE_SPLIT_64B = 0x0, + ADDR_SURF_TILE_SPLIT_128B = 0x1, + ADDR_SURF_TILE_SPLIT_256B = 0x2, + ADDR_SURF_TILE_SPLIT_512B = 0x3, + ADDR_SURF_TILE_SPLIT_1KB = 0x4, + ADDR_SURF_TILE_SPLIT_2KB = 0x5, + ADDR_SURF_TILE_SPLIT_4KB = 0x6, +} TileSplit; +typedef enum SampleSplit { + ADDR_SURF_SAMPLE_SPLIT_1 = 0x0, + ADDR_SURF_SAMPLE_SPLIT_2 = 0x1, + ADDR_SURF_SAMPLE_SPLIT_4 = 0x2, + ADDR_SURF_SAMPLE_SPLIT_8 = 0x3, +} SampleSplit; +typedef enum PipeConfig { + ADDR_SURF_P2 = 0x0, + ADDR_SURF_P2_RESERVED0 = 0x1, + ADDR_SURF_P2_RESERVED1 = 0x2, + ADDR_SURF_P2_RESERVED2 = 0x3, + ADDR_SURF_P4_8x16 = 0x4, + ADDR_SURF_P4_16x16 = 0x5, + ADDR_SURF_P4_16x32 = 0x6, + ADDR_SURF_P4_32x32 = 0x7, + ADDR_SURF_P8_16x16_8x16 = 0x8, + ADDR_SURF_P8_16x32_8x16 = 0x9, + ADDR_SURF_P8_32x32_8x16 = 0xa, + ADDR_SURF_P8_16x32_16x16 = 0xb, + ADDR_SURF_P8_32x32_16x16 = 0xc, + ADDR_SURF_P8_32x32_16x32 = 0xd, + ADDR_SURF_P8_32x64_32x32 = 0xe, +} PipeConfig; +typedef enum NumBanks { + ADDR_SURF_2_BANK = 0x0, + ADDR_SURF_4_BANK = 0x1, + ADDR_SURF_8_BANK = 0x2, + ADDR_SURF_16_BANK = 0x3, +} NumBanks; +typedef enum BankWidth { + ADDR_SURF_BANK_WIDTH_1 = 0x0, + ADDR_SURF_BANK_WIDTH_2 = 0x1, + ADDR_SURF_BANK_WIDTH_4 = 0x2, + ADDR_SURF_BANK_WIDTH_8 = 0x3, +} BankWidth; +typedef enum BankHeight { + ADDR_SURF_BANK_HEIGHT_1 = 0x0, + ADDR_SURF_BANK_HEIGHT_2 = 0x1, + ADDR_SURF_BANK_HEIGHT_4 = 0x2, + ADDR_SURF_BANK_HEIGHT_8 = 0x3, +} BankHeight; +typedef enum BankWidthHeight { + ADDR_SURF_BANK_WH_1 = 0x0, + ADDR_SURF_BANK_WH_2 = 0x1, + ADDR_SURF_BANK_WH_4 = 0x2, + ADDR_SURF_BANK_WH_8 = 0x3, +} BankWidthHeight; +typedef enum MacroTileAspect { + ADDR_SURF_MACRO_ASPECT_1 = 0x0, + ADDR_SURF_MACRO_ASPECT_2 = 0x1, + ADDR_SURF_MACRO_ASPECT_4 = 0x2, + ADDR_SURF_MACRO_ASPECT_8 = 0x3, +} MacroTileAspect; +typedef enum TCC_CACHE_POLICIES { + TCC_CACHE_POLICY_LRU = 0x0, + TCC_CACHE_POLICY_STREAM = 0x1, + TCC_CACHE_POLICY_BYPASS = 0x2, +} TCC_CACHE_POLICIES; +typedef enum PERFMON_COUNTER_MODE { + PERFMON_COUNTER_MODE_ACCUM = 0x0, + PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1, + PERFMON_COUNTER_MODE_MAX = 0x2, + PERFMON_COUNTER_MODE_DIRTY = 0x3, + PERFMON_COUNTER_MODE_SAMPLE = 0x4, + PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5, + PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6, + PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7, + PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8, + PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9, + PERFMON_COUNTER_MODE_RESERVED = 0xf, +} PERFMON_COUNTER_MODE; +typedef enum PERFMON_SPM_MODE { + PERFMON_SPM_MODE_OFF = 0x0, + PERFMON_SPM_MODE_16BIT_CLAMP = 0x1, + PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2, + PERFMON_SPM_MODE_32BIT_CLAMP = 0x3, + PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4, + PERFMON_SPM_MODE_RESERVED_5 = 0x5, + PERFMON_SPM_MODE_RESERVED_6 = 0x6, + PERFMON_SPM_MODE_RESERVED_7 = 0x7, + PERFMON_SPM_MODE_TEST_MODE_0 = 0x8, + PERFMON_SPM_MODE_TEST_MODE_1 = 0x9, + PERFMON_SPM_MODE_TEST_MODE_2 = 0xa, +} PERFMON_SPM_MODE; +typedef enum SurfaceTiling { + ARRAY_LINEAR = 0x0, + ARRAY_TILED = 0x1, +} SurfaceTiling; +typedef enum SurfaceArray { + ARRAY_1D = 0x0, + ARRAY_2D = 0x1, + ARRAY_3D = 0x2, + ARRAY_3D_SLICE = 0x3, +} SurfaceArray; +typedef enum ColorArray { + ARRAY_2D_ALT_COLOR = 0x0, + ARRAY_2D_COLOR = 0x1, + ARRAY_3D_SLICE_COLOR = 0x3, +} ColorArray; +typedef enum DepthArray { + ARRAY_2D_ALT_DEPTH = 0x0, + ARRAY_2D_DEPTH = 0x1, +} DepthArray; + +#endif /* DCE_8_0_ENUM_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h index 8a2930734477..c331c9fe7b81 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h @@ -4130,6 +4130,18 @@ #define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0xe #define PHY_AUX_CNTL__AUX_PAD_RXSEL_MASK 0x10000 #define PHY_AUX_CNTL__AUX_PAD_RXSEL__SHIFT 0x10 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK_MASK 0x1 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK__SHIFT 0x0 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS_MASK 0x2 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS__SHIFT 0x1 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV_MASK 0x4 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV__SHIFT 0x2 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK_MASK 0x10 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK__SHIFT 0x4 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS_MASK 0x20 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS__SHIFT 0x5 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV_MASK 0x40 +#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV__SHIFT 0x6 #define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A_MASK 0x1 #define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A__SHIFT 0x0 #define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h new file mode 100644 index 000000000000..d21c6b14662f --- /dev/null +++ b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h @@ -0,0 +1,102 @@ +/* + * Volcanic Islands IV SRC Register documentation + * + * Copyright (C) 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _IVSRCID_VISLANDS30_H_ +#define _IVSRCID_VISLANDS30_H_ + + +// IV Source IDs + +#define VISLANDS30_IV_SRCID_D1_V_UPDATE_INT 7 // 0x07 +#define VISLANDS30_IV_EXTID_D1_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D1_GRPH_PFLIP 8 // 0x08 +#define VISLANDS30_IV_EXTID_D1_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D2_V_UPDATE_INT 9 // 0x09 +#define VISLANDS30_IV_EXTID_D2_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D2_GRPH_PFLIP 10 // 0x0a +#define VISLANDS30_IV_EXTID_D2_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D3_V_UPDATE_INT 11 // 0x0b +#define VISLANDS30_IV_EXTID_D3_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D3_GRPH_PFLIP 12 // 0x0c +#define VISLANDS30_IV_EXTID_D3_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D4_V_UPDATE_INT 13 // 0x0d +#define VISLANDS30_IV_EXTID_D4_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D4_GRPH_PFLIP 14 // 0x0e +#define VISLANDS30_IV_EXTID_D4_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D5_V_UPDATE_INT 15 // 0x0f +#define VISLANDS30_IV_EXTID_D5_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D5_GRPH_PFLIP 16 // 0x10 +#define VISLANDS30_IV_EXTID_D5_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_D6_V_UPDATE_INT 17 // 0x11 +#define VISLANDS30_IV_EXTID_D6_V_UPDATE_INT 0 + +#define VISLANDS30_IV_SRCID_D6_GRPH_PFLIP 18 // 0x12 +#define VISLANDS30_IV_EXTID_D6_GRPH_PFLIP 0 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A 0 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_B 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B 1 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_C 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C 2 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_D 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D 3 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_E 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E 4 + +#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_F 42 // 0x2a +#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F 5 + +#define VISLANDS30_IV_SRCID_HPD_RX_A 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_A 6 + +#define VISLANDS30_IV_SRCID_HPD_RX_B 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_B 7 + +#define VISLANDS30_IV_SRCID_HPD_RX_C 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_C 8 + +#define VISLANDS30_IV_SRCID_HPD_RX_D 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_D 9 + +#define VISLANDS30_IV_SRCID_HPD_RX_E 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_E 10 + +#define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a +#define VISLANDS30_IV_EXTID_HPD_RX_F 11 + +#endif // _IVSRCID_VISLANDS30_H_ diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 888250b33ea8..a09d9f352871 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -221,7 +221,7 @@ struct kgd2kfd_calls { int (*resume)(struct kfd_dev *kfd); }; -bool kgd2kfd_init(unsigned interface_version, +int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f); #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index aa67244a77ae..2ee4190f8c89 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -29,6 +29,7 @@ #include "pp_instance.h" #include "power_state.h" #include "eventmanager.h" +#include "pp_debug.h" #define PP_CHECK(handle) \ do { \ @@ -433,7 +434,10 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) case PP_StateUILabel_Performance: return POWER_STATE_TYPE_PERFORMANCE; default: - return POWER_STATE_TYPE_DEFAULT; + if (state->classification.flags & PP_StateClassificationFlag_Boot) + return POWER_STATE_TYPE_INTERNAL_BOOT; + else + return POWER_STATE_TYPE_DEFAULT; } } @@ -535,6 +539,112 @@ static int pp_dpm_get_temperature(void *handle) return hwmgr->hwmgr_func->get_temperature(hwmgr); } +static int pp_dpm_get_pp_num_states(void *handle, + struct pp_states_info *data) +{ + struct pp_hwmgr *hwmgr; + int i; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->ps == NULL) + return -EINVAL; + + data->nums = hwmgr->num_ps; + + for (i = 0; i < hwmgr->num_ps; i++) { + struct pp_power_state *state = (struct pp_power_state *) + ((unsigned long)hwmgr->ps + i * hwmgr->ps_size); + switch (state->classification.ui_label) { + case PP_StateUILabel_Battery: + data->states[i] = POWER_STATE_TYPE_BATTERY; + break; + case PP_StateUILabel_Balanced: + data->states[i] = POWER_STATE_TYPE_BALANCED; + break; + case PP_StateUILabel_Performance: + data->states[i] = POWER_STATE_TYPE_PERFORMANCE; + break; + default: + if (state->classification.flags & PP_StateClassificationFlag_Boot) + data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT; + else + data->states[i] = POWER_STATE_TYPE_DEFAULT; + } + } + + return 0; +} + +static int pp_dpm_get_pp_table(void *handle, char **table) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->get_pp_table == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_pp_table(hwmgr, table); +} + +static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->set_pp_table == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size); +} + +static int pp_dpm_force_clock_level(void *handle, + enum pp_clock_type type, int level) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->force_clock_level == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level); +} + +static int pp_dpm_print_clock_levels(void *handle, + enum pp_clock_type type, char *buf) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || + hwmgr->hwmgr_func->print_clock_levels == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); +} + const struct amd_powerplay_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, @@ -552,6 +662,11 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .get_fan_control_mode = pp_dpm_get_fan_control_mode, .set_fan_speed_percent = pp_dpm_set_fan_speed_percent, .get_fan_speed_percent = pp_dpm_get_fan_speed_percent, + .get_pp_num_states = pp_dpm_get_pp_num_states, + .get_pp_table = pp_dpm_get_pp_table, + .set_pp_table = pp_dpm_set_pp_table, + .force_clock_level = pp_dpm_force_clock_level, + .print_clock_levels = pp_dpm_print_clock_levels, }; static int amd_pp_instance_init(struct amd_pp_init *pp_init, @@ -635,10 +750,10 @@ int amd_powerplay_fini(void *handle) /* export this function to DAL */ -int amd_powerplay_display_configuration_change(void *handle, const void *input) +int amd_powerplay_display_configuration_change(void *handle, + const struct amd_pp_display_configuration *display_config) { struct pp_hwmgr *hwmgr; - const struct amd_pp_display_configuration *display_config = input; PP_CHECK((struct pp_instance *)handle); @@ -650,7 +765,7 @@ int amd_powerplay_display_configuration_change(void *handle, const void *input) } int amd_powerplay_get_display_power_level(void *handle, - struct amd_pp_dal_clock_info *output) + struct amd_pp_simple_clock_info *output) { struct pp_hwmgr *hwmgr; @@ -663,3 +778,86 @@ int amd_powerplay_get_display_power_level(void *handle, return phm_get_dal_power_level(hwmgr, output); } + +int amd_powerplay_get_current_clocks(void *handle, + struct amd_pp_clock_info *clocks) +{ + struct pp_hwmgr *hwmgr; + struct amd_pp_simple_clock_info simple_clocks; + struct pp_clock_info hw_clocks; + + PP_CHECK((struct pp_instance *)handle); + + if (clocks == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + phm_get_dal_power_level(hwmgr, &simple_clocks); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { + if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment)) + PP_ASSERT_WITH_CODE(0, "Error in PHM_GetPowerContainmentClockInfo", return -1); + } else { + if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_Activity)) + PP_ASSERT_WITH_CODE(0, "Error in PHM_GetClockInfo", return -1); + } + + clocks->min_engine_clock = hw_clocks.min_eng_clk; + clocks->max_engine_clock = hw_clocks.max_eng_clk; + clocks->min_memory_clock = hw_clocks.min_mem_clk; + clocks->max_memory_clock = hw_clocks.max_mem_clk; + clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; + clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; + + clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; + clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; + + clocks->max_clocks_state = simple_clocks.level; + + if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) { + clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; + clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; + } + + return 0; + +} + +int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) +{ + int result = -1; + + struct pp_hwmgr *hwmgr; + + PP_CHECK((struct pp_instance *)handle); + + if (clocks == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + result = phm_get_clock_by_type(hwmgr, type, clocks); + + return result; +} + +int amd_powerplay_get_display_mode_validation_clocks(void *handle, + struct amd_pp_simple_clock_info *clocks) +{ + int result = -1; + struct pp_hwmgr *hwmgr; + + PP_CHECK((struct pp_instance *)handle); + + if (clocks == NULL) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) + result = phm_get_max_high_clocks(hwmgr, clocks); + + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 0874ab42ee95..ef1daf1251c7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -715,7 +715,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, unsigned long clock = 0; unsigned long level; unsigned long stable_pstate_sclk; - struct PP_Clocks clocks; unsigned long percentage; cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; @@ -726,8 +725,9 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, else cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; - /*PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks);*/ - clock = clocks.engineClock; + clock = hwmgr->display_config.min_core_set_clock; + if (clock == 0) + printk(KERN_ERR "[ powerplay ] min_core_set_clock not set\n"); if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { cz_hwmgr->sclk_dpm.hard_min_clk = clock; @@ -883,9 +883,9 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr, if (pnew_state->action == FORCE_HIGH) cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); - else if(pnew_state->action == CANCEL_FORCE_HIGH) - cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); - else + else if (pnew_state->action == CANCEL_FORCE_HIGH) + cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch); + else cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch); } return 0; @@ -1110,9 +1110,10 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cast_const_PhwCzPowerState(&pcurrent_ps->hardware); struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - struct PP_Clocks clocks; + struct PP_Clocks clocks = {0, 0, 0, 0}; bool force_high; - unsigned long num_of_active_displays = 4; + uint32_t num_of_active_displays = 0; + struct cgs_display_info info = {0}; cz_ps->evclk = hwmgr->vce_arbiter.evclk; cz_ps->ecclk = hwmgr->vce_arbiter.ecclk; @@ -1124,12 +1125,15 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); - /* to do PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks); */ - /* PECI_GetNumberOfActiveDisplays(pHwMgr->pPECI, &numOfActiveDisplays); */ + clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? + hwmgr->display_config.min_mem_set_clock : + cz_hwmgr->sys_info.nbp_memory_clock[1]; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_of_active_displays = info.display_count; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; - else - clocks.memoryClock = 0; if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk) clocks.memoryClock = hwmgr->gfx_arbiter.mclk; @@ -1199,6 +1203,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n"); return result; } + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS; result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings)); if (result != 0) { @@ -1630,10 +1635,10 @@ static void cz_hw_print_display_cfg( & PWRMGT_SEPARATION_TIME_MASK) << PWRMGT_SEPARATION_TIME_SHIFT; - data|= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0) + data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0) << PWRMGT_DISABLE_CPU_CSTATES_SHIFT; - data|= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0) + data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0) << PWRMGT_DISABLE_CPU_PSTATES_SHIFT; PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n", @@ -1648,9 +1653,9 @@ static void cz_hw_print_display_cfg( } - static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, +static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) - { +{ struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); if (separation_time != @@ -1678,20 +1683,19 @@ static void cz_hw_print_display_cfg( return 0; } - static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, - struct amd_pp_dal_clock_info*info) +static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, + struct amd_pp_simple_clock_info *info) { uint32_t i; - const struct phm_clock_voltage_dependency_table * table = + const struct phm_clock_voltage_dependency_table *table = hwmgr->dyn_state.vddc_dep_on_dal_pwrl; - const struct phm_clock_and_voltage_limits* limits = + const struct phm_clock_and_voltage_limits *limits = &hwmgr->dyn_state.max_clock_voltage_on_ac; info->engine_max_clock = limits->sclk; info->memory_max_clock = limits->mclk; for (i = table->count - 1; i > 0; i--) { - if (limits->vddc >= table->entries[i].v) { info->level = table->entries[i].clk; return 0; @@ -1700,6 +1704,158 @@ static void cz_hw_print_display_cfg( return -EINVAL; } +static int cz_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, int level) +{ + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMin, + (1 << level)); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetSclkSoftMax, + (1 << level)); + break; + default: + break; + } + + return 0; +} + +static int cz_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct phm_clock_voltage_dependency_table *sclk_table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + int i, now, size = 0; + + switch (type) { + case PP_SCLK: + now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixTARGET_AND_CURRENT_PROFILE_INDEX), + TARGET_AND_CURRENT_PROFILE_INDEX, + CURR_SCLK_INDEX); + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->entries[i].clk / 100, + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + +static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + const struct cz_power_state *ps; + struct cz_hwmgr *data; + uint32_t level_index; + uint32_t i; + + if (level == NULL || hwmgr == NULL || state == NULL) + return -EINVAL; + + data = (struct cz_hwmgr *)(hwmgr->backend); + ps = cast_const_PhwCzPowerState(state); + + level_index = index > ps->level - 1 ? ps->level - 1 : index; + + level->coreClock = ps->levels[level_index].engineClock; + + if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { + for (i = 1; i < ps->level; i++) { + if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) { + level->coreClock = ps->levels[i].engineClock; + break; + } + } + } + + if (level_index == 0) + level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]; + else + level->memory_clock = data->sys_info.nbp_memory_clock[0]; + + level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4; + level->nonLocalMemoryFreq = 0; + level->nonLocalMemoryWidth = 0; + + return 0; +} + +static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) +{ + const struct cz_power_state *ps = cast_const_PhwCzPowerState(state); + + clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex)); + clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex)); + + return 0; +} + +static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks) +{ + struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend); + int i; + struct phm_clock_voltage_dependency_table *table; + + clocks->count = cz_get_max_sclk_level(hwmgr); + switch (type) { + case amd_pp_disp_clock: + for (i = 0; i < clocks->count; i++) + clocks->clock[i] = data->sys_info.display_clock[i]; + break; + case amd_pp_sys_clock: + table = hwmgr->dyn_state.vddc_dependency_on_sclk; + for (i = 0; i < clocks->count; i++) + clocks->clock[i] = table->entries[i].clk; + break; + case amd_pp_mem_clock: + clocks->count = CZ_NUM_NBPMEMORYCLOCK; + for (i = 0; i < clocks->count; i++) + clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; + break; + default: + return -1; + } + + return 0; +} + +static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) +{ + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + unsigned long level; + const struct phm_clock_and_voltage_limits *limits = + &hwmgr->dyn_state.max_clock_voltage_on_ac; + + if ((NULL == table) || (table->count <= 0) || (clocks == NULL)) + return -EINVAL; + + level = cz_get_max_sclk_level(hwmgr) - 1; + + if (level < table->count) + clocks->engine_max_clock = table->entries[level].clk; + else + clocks->engine_max_clock = table->entries[table->count - 1].clk; + + clocks->memory_max_clock = limits->mclk; + + return 0; +} + static const struct pp_hwmgr_func cz_hwmgr_funcs = { .backend_init = cz_hwmgr_backend_init, .backend_fini = cz_hwmgr_backend_fini, @@ -1718,7 +1874,13 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { .print_current_perforce_level = cz_print_current_perforce_level, .set_cpu_power_state = cz_set_cpu_power_state, .store_cc6_data = cz_store_cc6_data, - .get_dal_power_level= cz_get_dal_power_level, + .force_clock_level = cz_force_clock_level, + .print_clock_levels = cz_print_clock_levels, + .get_dal_power_level = cz_get_dal_power_level, + .get_performance_level = cz_get_performance_level, + .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, + .get_clock_by_type = cz_get_clock_by_type, + .get_max_high_clocks = cz_get_max_high_clocks, }; int cz_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 28031a7eddba..5cca2ecc6bea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -5073,6 +5073,125 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } +static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + *table = (char *)&data->smc_state_table; + + return sizeof(struct SMU73_Discrete_DpmTable); +} + +static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + void *table = (void *)&data->smc_state_table; + + memcpy(table, buf, size); + + return 0; +} + +static int fiji_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, int level) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + break; + case PP_MCLK: + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + break; + case PP_PCIE: + if (!data->pcie_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + (1 << level)); + break; + default: + break; + } + + return 0; +} + +static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = fiji_get_current_pcie_speed(hwmgr); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .backend_init = &fiji_hwmgr_backend_init, .backend_fini = &tonga_hwmgr_backend_fini, @@ -5108,6 +5227,10 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, .set_fan_control_mode = fiji_set_fan_control_mode, .get_fan_control_mode = fiji_get_fan_control_mode, + .get_pp_table = fiji_get_pp_table, + .set_pp_table = fiji_set_pp_table, + .force_clock_level = fiji_force_clock_level, + .print_clock_levels = fiji_print_clock_levels, }; int fiji_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 0f2d5e4bc241..be31bed2538a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -26,7 +26,7 @@ #include "power_state.h" #include "pp_acpi.h" #include "amd_acpi.h" -#include "amd_powerplay.h" +#include "pp_debug.h" #define PHM_FUNC_CHECK(hw) \ do { \ @@ -313,13 +313,12 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, } int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, - struct amd_pp_dal_clock_info *info) + struct amd_pp_simple_clock_info *info) { PHM_FUNC_CHECK(hwmgr); if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL) return -EINVAL; - return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info); } @@ -332,3 +331,91 @@ int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr) return 0; } + + +int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + PHM_FUNC_CHECK(hwmgr); + if (hwmgr->hwmgr_func->get_performance_level == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level); + + +} + + +/** +* Gets Clock Info. +* +* @param pHwMgr the address of the powerplay hardware manager. +* @param pPowerState the address of the Power State structure. +* @param pClockInfo the address of PP_ClockInfo structure where the result will be returned. +* @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end. +*/ +int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info, + PHM_PerformanceLevelDesignation designation) +{ + int result; + PHM_PerformanceLevel performance_level; + + PHM_FUNC_CHECK(hwmgr); + + PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL); + PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL); + + result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level); + + PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result); + + + pclock_info->min_mem_clk = performance_level.memory_clock; + pclock_info->min_eng_clk = performance_level.coreClock; + pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth; + + + result = phm_get_performance_level(hwmgr, state, designation, + (hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level); + + PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result); + + pclock_info->max_mem_clk = performance_level.memory_clock; + pclock_info->max_eng_clk = performance_level.coreClock; + pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth; + + return 0; +} + +int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info); + +} + +int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->get_clock_by_type == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks); + +} + +int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->get_max_high_clocks == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks); +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h index b7429a527828..b10df328d58c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h @@ -293,7 +293,7 @@ fInt GetScaledFraction(int X, int factor) } if (factor == 1) - return (ConvertToFraction(X)); + return ConvertToFraction(X); fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor)); @@ -371,7 +371,7 @@ fInt fDivide (fInt X, fInt Y) fZERO = ConvertToFraction(0); if (Equal(Y, fZERO)) - return fZERO; + return fZERO; longlongX = (int64_t)X.full; longlongY = (int64_t)Y.full; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 44a925006479..bc83fa35ec46 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -6018,6 +6018,125 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } +static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + *table = (char *)&data->smc_state_table; + + return sizeof(struct SMU72_Discrete_DpmTable); +} + +static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + void *table = (void *)&data->smc_state_table; + + memcpy(table, buf, size); + + return 0; +} + +static int tonga_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, int level) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + break; + case PP_MCLK: + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + break; + case PP_PCIE: + if (!data->pcie_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + (1 << level)); + break; + default: + break; + } + + return 0; +} + +static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = tonga_get_current_pcie_speed(hwmgr); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .backend_init = &tonga_hwmgr_backend_init, .backend_fini = &tonga_hwmgr_backend_fini, @@ -6055,6 +6174,10 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .check_states_equal = tonga_check_states_equal, .set_fan_control_mode = tonga_set_fan_control_mode, .get_fan_control_mode = tonga_get_fan_control_mode, + .get_pp_table = tonga_get_pp_table, + .set_pp_table = tonga_set_pp_table, + .force_clock_level = tonga_force_clock_level, + .print_clock_levels = tonga_print_clock_levels, }; int tonga_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index e61a3e67852e..7255f7ddf93a 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -29,6 +29,7 @@ #include "amd_shared.h" #include "cgs_common.h" + enum amd_pp_event { AMD_PP_EVENT_INITIALIZE = 0, AMD_PP_EVENT_UNINITIALIZE, @@ -123,6 +124,7 @@ enum amd_dpm_forced_level { AMD_DPM_FORCED_LEVEL_AUTO = 0, AMD_DPM_FORCED_LEVEL_LOW = 1, AMD_DPM_FORCED_LEVEL_HIGH = 2, + AMD_DPM_FORCED_LEVEL_MANUAL = 3, }; struct amd_pp_init { @@ -212,12 +214,55 @@ struct amd_pp_display_configuration { uint32_t dce_tolerable_mclk_in_active_latency; }; -struct amd_pp_dal_clock_info { +struct amd_pp_simple_clock_info { uint32_t engine_max_clock; uint32_t memory_max_clock; uint32_t level; }; +enum PP_DAL_POWERLEVEL { + PP_DAL_POWERLEVEL_INVALID = 0, + PP_DAL_POWERLEVEL_ULTRALOW, + PP_DAL_POWERLEVEL_LOW, + PP_DAL_POWERLEVEL_NOMINAL, + PP_DAL_POWERLEVEL_PERFORMANCE, + + PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW, + PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW, + PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL, + PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE, + PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1, + PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1, + PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1, + PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1, +}; + +struct amd_pp_clock_info { + uint32_t min_engine_clock; + uint32_t max_engine_clock; + uint32_t min_memory_clock; + uint32_t max_memory_clock; + uint32_t min_bus_bandwidth; + uint32_t max_bus_bandwidth; + uint32_t max_engine_clock_in_sr; + uint32_t min_engine_clock_in_sr; + enum PP_DAL_POWERLEVEL max_clocks_state; +}; + +enum amd_pp_clock_type { + amd_pp_disp_clock = 1, + amd_pp_sys_clock, + amd_pp_mem_clock +}; + +#define MAX_NUM_CLOCKS 16 + +struct amd_pp_clocks { + uint32_t count; + uint32_t clock[MAX_NUM_CLOCKS]; +}; + + enum { PP_GROUP_UNKNOWN = 0, PP_GROUP_GFX = 1, @@ -225,6 +270,17 @@ enum { PP_GROUP_MAX }; +enum pp_clock_type { + PP_SCLK, + PP_MCLK, + PP_PCIE, +}; + +struct pp_states_info { + uint32_t nums; + uint32_t states[16]; +}; + #define PP_GROUP_MASK 0xF0000000 #define PP_GROUP_SHIFT 28 @@ -278,6 +334,11 @@ struct amd_powerplay_funcs { int (*get_fan_control_mode)(void *handle); int (*set_fan_speed_percent)(void *handle, uint32_t percent); int (*get_fan_speed_percent)(void *handle, uint32_t *speed); + int (*get_pp_num_states)(void *handle, struct pp_states_info *data); + int (*get_pp_table)(void *handle, char **table); + int (*set_pp_table)(void *handle, const char *buf, size_t size); + int (*force_clock_level)(void *handle, enum pp_clock_type type, int level); + int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); }; struct amd_powerplay { @@ -288,12 +349,23 @@ struct amd_powerplay { int amd_powerplay_init(struct amd_pp_init *pp_init, struct amd_powerplay *amd_pp); + int amd_powerplay_fini(void *handle); -int amd_powerplay_display_configuration_change(void *handle, const void *input); +int amd_powerplay_display_configuration_change(void *handle, + const struct amd_pp_display_configuration *input); int amd_powerplay_get_display_power_level(void *handle, - struct amd_pp_dal_clock_info *output); + struct amd_pp_simple_clock_info *output); + +int amd_powerplay_get_current_clocks(void *handle, + struct amd_pp_clock_info *output); + +int amd_powerplay_get_clock_by_type(void *handle, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks); +int amd_powerplay_get_display_mode_validation_clocks(void *handle, + struct amd_pp_simple_clock_info *output); #endif /* _AMD_POWERPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 91795efe1336..040d3f7cbf49 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -31,6 +31,7 @@ struct pp_power_state; enum amd_dpm_forced_level; struct PP_TemperatureRange; + struct phm_fan_speed_info { uint32_t min_percent; uint32_t max_percent; @@ -290,6 +291,15 @@ struct PP_Clocks { uint32_t engineClockInSR; }; +struct pp_clock_info { + uint32_t min_mem_clk; + uint32_t max_mem_clk; + uint32_t min_eng_clk; + uint32_t max_eng_clk; + uint32_t min_bus_bandwidth; + uint32_t max_bus_bandwidth; +}; + struct phm_platform_descriptor { uint32_t platformCaps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES]; uint32_t vbiosInterruptId; @@ -323,24 +333,6 @@ struct phm_clocks { uint32_t clock[MAX_NUM_CLOCKS]; }; -enum PP_DAL_POWERLEVEL { - PP_DAL_POWERLEVEL_INVALID = 0, - PP_DAL_POWERLEVEL_ULTRALOW, - PP_DAL_POWERLEVEL_LOW, - PP_DAL_POWERLEVEL_NOMINAL, - PP_DAL_POWERLEVEL_PERFORMANCE, - - PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW, - PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW, - PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL, - PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE, - PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1, - PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1, - PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1, - PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1, -}; - - extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr); extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate); extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate); @@ -375,11 +367,25 @@ extern int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, const struct amd_pp_display_configuration *display_config); extern int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, - struct amd_pp_dal_clock_info*info); + struct amd_pp_simple_clock_info *info); extern int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr); extern int phm_power_down_asic(struct pp_hwmgr *hwmgr); +extern int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level); + +extern int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + struct pp_clock_info *pclock_info, + PHM_PerformanceLevelDesignation designation); + +extern int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); + +extern int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); + +extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); + #endif /* _HARDWARE_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index aeaa3dbba525..928f5a740cba 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -325,8 +325,18 @@ struct pp_hwmgr_func { bool cc6_disable, bool pstate_disable, bool pstate_switch_disable); int (*get_dal_power_level)(struct pp_hwmgr *hwmgr, - struct amd_pp_dal_clock_info *info); + struct amd_pp_simple_clock_info *info); + int (*get_performance_level)(struct pp_hwmgr *, const struct pp_hw_power_state *, + PHM_PerformanceLevelDesignation, uint32_t, PHM_PerformanceLevel *); + int (*get_current_shallow_sleep_clocks)(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); + int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); + int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); int (*power_off_asic)(struct pp_hwmgr *hwmgr); + int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table); + int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size); + int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, int level); + int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); }; struct pp_table_func { diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 8b2becd1aa07..a5ff9458d359 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -229,6 +229,14 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) amd_sched_wakeup(entity->sched); } +static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_entity *entity = + container_of(cb, struct amd_sched_entity, cb); + entity->dependency = NULL; + fence_put(f); +} + static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = entity->sched; @@ -251,7 +259,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) } /* Wait for fence to be scheduled */ - entity->cb.func = amd_sched_entity_wakeup; + entity->cb.func = amd_sched_entity_clear_dep; list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); return true; } diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig new file mode 100644 index 000000000000..eaed454e043c --- /dev/null +++ b/drivers/gpu/drm/arm/Kconfig @@ -0,0 +1,27 @@ +config DRM_ARM + bool + help + Choose this option to select drivers for ARM's devices + +config DRM_HDLCD + tristate "ARM HDLCD" + depends on DRM && OF && (ARM || ARM64) + depends on COMMON_CLK + select DRM_ARM + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + select DRM_KMS_CMA_HELPER + help + Choose this option if you have an ARM High Definition Colour LCD + controller. + + If M is selected the module will be called hdlcd. + +config DRM_HDLCD_SHOW_UNDERRUN + bool "Show underrun conditions" + depends on DRM_HDLCD + default n + help + Enable this option to show in red colour the pixels that the + HDLCD device did not fetch from framebuffer due to underrun + conditions. diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile new file mode 100644 index 000000000000..89dcb7bab93a --- /dev/null +++ b/drivers/gpu/drm/arm/Makefile @@ -0,0 +1,2 @@ +hdlcd-y := hdlcd_drv.o hdlcd_crtc.o +obj-$(CONFIG_DRM_HDLCD) += hdlcd.o diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c new file mode 100644 index 000000000000..fef1b04c2aab --- /dev/null +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -0,0 +1,327 @@ +/* + * Copyright (C) 2013-2015 ARM Limited + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * Implementation of a CRTC class for the HDLCD driver. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_plane_helper.h> +#include <linux/clk.h> +#include <linux/of_graph.h> +#include <linux/platform_data/simplefb.h> +#include <video/videomode.h> + +#include "hdlcd_drv.h" +#include "hdlcd_regs.h" + +/* + * The HDLCD controller is a dumb RGB streamer that gets connected to + * a single HDMI transmitter or in the case of the ARM Models it gets + * emulated by the software that does the actual rendering. + * + */ + +static const struct drm_crtc_funcs hdlcd_crtc_funcs = { + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS; + +/* + * Setup the HDLCD registers for decoding the pixels out of the framebuffer + */ +static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc) +{ + unsigned int btpp; + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + uint32_t pixel_format; + struct simplefb_format *format = NULL; + int i; + + pixel_format = crtc->primary->state->fb->pixel_format; + + for (i = 0; i < ARRAY_SIZE(supported_formats); i++) { + if (supported_formats[i].fourcc == pixel_format) + format = &supported_formats[i]; + } + + if (WARN_ON(!format)) + return 0; + + /* HDLCD uses 'bytes per pixel', zero means 1 byte */ + btpp = (format->bits_per_pixel + 7) / 8; + hdlcd_write(hdlcd, HDLCD_REG_PIXEL_FORMAT, (btpp - 1) << 3); + + /* + * The format of the HDLCD_REG_<color>_SELECT register is: + * - bits[23:16] - default value for that color component + * - bits[11:8] - number of bits to extract for each color component + * - bits[4:0] - index of the lowest bit to extract + * + * The default color value is used when bits[11:8] are zero, when the + * pixel is outside the visible frame area or when there is a + * buffer underrun. + */ + hdlcd_write(hdlcd, HDLCD_REG_RED_SELECT, format->red.offset | +#ifdef CONFIG_DRM_HDLCD_SHOW_UNDERRUN + 0x00ff0000 | /* show underruns in red */ +#endif + ((format->red.length & 0xf) << 8)); + hdlcd_write(hdlcd, HDLCD_REG_GREEN_SELECT, format->green.offset | + ((format->green.length & 0xf) << 8)); + hdlcd_write(hdlcd, HDLCD_REG_BLUE_SELECT, format->blue.offset | + ((format->blue.length & 0xf) << 8)); + + return 0; +} + +static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + struct drm_display_mode *m = &crtc->state->adjusted_mode; + struct videomode vm; + unsigned int polarities, line_length, err; + + vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; + vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; + vm.vsync_len = m->crtc_vsync_end - m->crtc_vsync_start; + vm.hfront_porch = m->crtc_hsync_start - m->crtc_hdisplay; + vm.hback_porch = m->crtc_htotal - m->crtc_hsync_end; + vm.hsync_len = m->crtc_hsync_end - m->crtc_hsync_start; + + polarities = HDLCD_POLARITY_DATAEN | HDLCD_POLARITY_DATA; + + if (m->flags & DRM_MODE_FLAG_PHSYNC) + polarities |= HDLCD_POLARITY_HSYNC; + if (m->flags & DRM_MODE_FLAG_PVSYNC) + polarities |= HDLCD_POLARITY_VSYNC; + + line_length = crtc->primary->state->fb->pitches[0]; + + /* Allow max number of outstanding requests and largest burst size */ + hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, + HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); + + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1); + hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); + hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); + hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); + hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); + hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); + + err = hdlcd_set_pxl_fmt(crtc); + if (err) + return; + + clk_set_rate(hdlcd->clk, m->crtc_clock * 1000); +} + +static void hdlcd_crtc_enable(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + + clk_prepare_enable(hdlcd->clk); + hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); + drm_crtc_vblank_on(crtc); +} + +static void hdlcd_crtc_disable(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + + if (!crtc->primary->fb) + return; + + clk_disable_unprepare(hdlcd->clk); + hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); + drm_crtc_vblank_off(crtc); +} + +static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + struct drm_display_mode *mode = &state->adjusted_mode; + long rate, clk_rate = mode->clock * 1000; + + rate = clk_round_rate(hdlcd->clk, clk_rate); + if (rate != clk_rate) { + /* clock required by mode not supported by hardware */ + return -EINVAL; + } + + return 0; +} + +static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + unsigned long flags; + + if (crtc->state->event) { + struct drm_pending_vblank_event *event = crtc->state->event; + + crtc->state->event = NULL; + event->pipe = drm_crtc_index(crtc); + + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + list_add_tail(&event->base.link, &hdlcd->event_list); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} + +static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ +} + +static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { + .mode_fixup = hdlcd_crtc_mode_fixup, + .mode_set = drm_helper_crtc_mode_set, + .mode_set_base = drm_helper_crtc_mode_set_base, + .mode_set_nofb = hdlcd_crtc_mode_set_nofb, + .enable = hdlcd_crtc_enable, + .disable = hdlcd_crtc_disable, + .prepare = hdlcd_crtc_disable, + .commit = hdlcd_crtc_enable, + .atomic_check = hdlcd_crtc_atomic_check, + .atomic_begin = hdlcd_crtc_atomic_begin, + .atomic_flush = hdlcd_crtc_atomic_flush, +}; + +static int hdlcd_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + return 0; +} + +static void hdlcd_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct hdlcd_drm_private *hdlcd; + struct drm_gem_cma_object *gem; + dma_addr_t scanout_start; + + if (!plane->state->crtc || !plane->state->fb) + return; + + hdlcd = crtc_to_hdlcd_priv(plane->state->crtc); + gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); + scanout_start = gem->paddr; + hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); +} + +static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { + .prepare_fb = NULL, + .cleanup_fb = NULL, + .atomic_check = hdlcd_plane_atomic_check, + .atomic_update = hdlcd_plane_atomic_update, +}; + +static void hdlcd_plane_destroy(struct drm_plane *plane) +{ + drm_plane_helper_disable(plane); + drm_plane_cleanup(plane); +} + +static const struct drm_plane_funcs hdlcd_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = hdlcd_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct drm_plane *plane = NULL; + u32 formats[ARRAY_SIZE(supported_formats)], i; + int ret; + + plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < ARRAY_SIZE(supported_formats); i++) + formats[i] = supported_formats[i].fourcc; + + ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs, + formats, ARRAY_SIZE(formats), + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + devm_kfree(drm->dev, plane); + return ERR_PTR(ret); + } + + drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs); + hdlcd->plane = plane; + + return plane; +} + +void hdlcd_crtc_suspend(struct drm_crtc *crtc) +{ + hdlcd_crtc_disable(crtc); +} + +void hdlcd_crtc_resume(struct drm_crtc *crtc) +{ + hdlcd_crtc_enable(crtc); +} + +int hdlcd_setup_crtc(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct drm_plane *primary; + int ret; + + primary = hdlcd_plane_init(drm); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL, + &hdlcd_crtc_funcs, NULL); + if (ret) { + hdlcd_plane_destroy(primary); + devm_kfree(drm->dev, primary); + return ret; + } + + drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs); + return 0; +} diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c new file mode 100644 index 000000000000..56b829f97699 --- /dev/null +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -0,0 +1,550 @@ +/* + * Copyright (C) 2013-2015 ARM Limited + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * ARM HDLCD Driver + */ + +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/list.h> +#include <linux/of_graph.h> +#include <linux/of_reserved_mem.h> +#include <linux/pm_runtime.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_of.h> + +#include "hdlcd_drv.h" +#include "hdlcd_regs.h" + +static int hdlcd_load(struct drm_device *drm, unsigned long flags) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct platform_device *pdev = to_platform_device(drm->dev); + struct resource *res; + u32 version; + int ret; + + hdlcd->clk = devm_clk_get(drm->dev, "pxlclk"); + if (IS_ERR(hdlcd->clk)) + return PTR_ERR(hdlcd->clk); + +#ifdef CONFIG_DEBUG_FS + atomic_set(&hdlcd->buffer_underrun_count, 0); + atomic_set(&hdlcd->bus_error_count, 0); + atomic_set(&hdlcd->vsync_count, 0); + atomic_set(&hdlcd->dma_end_count, 0); +#endif + + INIT_LIST_HEAD(&hdlcd->event_list); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hdlcd->mmio = devm_ioremap_resource(drm->dev, res); + if (IS_ERR(hdlcd->mmio)) { + DRM_ERROR("failed to map control registers area\n"); + ret = PTR_ERR(hdlcd->mmio); + hdlcd->mmio = NULL; + goto fail; + } + + version = hdlcd_read(hdlcd, HDLCD_REG_VERSION); + if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) { + DRM_ERROR("unknown product id: 0x%x\n", version); + ret = -EINVAL; + goto fail; + } + DRM_INFO("found ARM HDLCD version r%dp%d\n", + (version & HDLCD_VERSION_MAJOR_MASK) >> 8, + version & HDLCD_VERSION_MINOR_MASK); + + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(drm->dev); + if (ret && ret != -ENODEV) + goto fail; + + ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); + if (ret) + goto setup_fail; + + ret = hdlcd_setup_crtc(drm); + if (ret < 0) { + DRM_ERROR("failed to create crtc\n"); + goto setup_fail; + } + + pm_runtime_enable(drm->dev); + + pm_runtime_get_sync(drm->dev); + ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); + pm_runtime_put_sync(drm->dev); + if (ret < 0) { + DRM_ERROR("failed to install IRQ handler\n"); + goto irq_fail; + } + + return 0; + +irq_fail: + drm_crtc_cleanup(&hdlcd->crtc); +setup_fail: + of_reserved_mem_device_release(drm->dev); +fail: + devm_clk_put(drm->dev, hdlcd->clk); + + return ret; +} + +static void hdlcd_fb_output_poll_changed(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + + if (hdlcd->fbdev) + drm_fbdev_cma_hotplug_event(hdlcd->fbdev); +} + +static int hdlcd_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, bool async) +{ + return drm_atomic_helper_commit(dev, state, false); +} + +static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = { + .fb_create = drm_fb_cma_create, + .output_poll_changed = hdlcd_fb_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = hdlcd_atomic_commit, +}; + +static void hdlcd_setup_mode_config(struct drm_device *drm) +{ + drm_mode_config_init(drm); + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + drm->mode_config.max_width = HDLCD_MAX_XRES; + drm->mode_config.max_height = HDLCD_MAX_YRES; + drm->mode_config.funcs = &hdlcd_mode_config_funcs; +} + +static void hdlcd_lastclose(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + + drm_fbdev_cma_restore_mode(hdlcd->fbdev); +} + +static irqreturn_t hdlcd_irq(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct hdlcd_drm_private *hdlcd = drm->dev_private; + unsigned long irq_status; + + irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS); + +#ifdef CONFIG_DEBUG_FS + if (irq_status & HDLCD_INTERRUPT_UNDERRUN) + atomic_inc(&hdlcd->buffer_underrun_count); + + if (irq_status & HDLCD_INTERRUPT_DMA_END) + atomic_inc(&hdlcd->dma_end_count); + + if (irq_status & HDLCD_INTERRUPT_BUS_ERROR) + atomic_inc(&hdlcd->bus_error_count); + + if (irq_status & HDLCD_INTERRUPT_VSYNC) + atomic_inc(&hdlcd->vsync_count); + +#endif + if (irq_status & HDLCD_INTERRUPT_VSYNC) { + bool events_sent = false; + unsigned long flags; + struct drm_pending_vblank_event *e, *t; + + drm_crtc_handle_vblank(&hdlcd->crtc); + + spin_lock_irqsave(&drm->event_lock, flags); + list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) { + list_del(&e->base.link); + drm_crtc_send_vblank_event(&hdlcd->crtc, e); + events_sent = true; + } + if (events_sent) + drm_crtc_vblank_put(&hdlcd->crtc); + spin_unlock_irqrestore(&drm->event_lock, flags); + } + + /* acknowledge interrupt(s) */ + hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); + + return IRQ_HANDLED; +} + +static void hdlcd_irq_preinstall(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + /* Ensure interrupts are disabled */ + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0); + hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0); +} + +static int hdlcd_irq_postinstall(struct drm_device *drm) +{ +#ifdef CONFIG_DEBUG_FS + struct hdlcd_drm_private *hdlcd = drm->dev_private; + unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); + + /* enable debug interrupts */ + irq_mask |= HDLCD_DEBUG_INT_MASK; + + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask); +#endif + return 0; +} + +static void hdlcd_irq_uninstall(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + /* disable all the interrupts that we might have enabled */ + unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); + +#ifdef CONFIG_DEBUG_FS + /* disable debug interrupts */ + irq_mask &= ~HDLCD_DEBUG_INT_MASK; +#endif + + /* disable vsync interrupts */ + irq_mask &= ~HDLCD_INTERRUPT_VSYNC; + + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask); +} + +static int hdlcd_enable_vblank(struct drm_device *drm, unsigned int crtc) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); + + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask | HDLCD_INTERRUPT_VSYNC); + + return 0; +} + +static void hdlcd_disable_vblank(struct drm_device *drm, unsigned int crtc) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); + + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask & ~HDLCD_INTERRUPT_VSYNC); +} + +#ifdef CONFIG_DEBUG_FS +static int hdlcd_show_underrun_count(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *drm = node->minor->dev; + struct hdlcd_drm_private *hdlcd = drm->dev_private; + + seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count)); + seq_printf(m, "dma_end : %d\n", atomic_read(&hdlcd->dma_end_count)); + seq_printf(m, "bus_error: %d\n", atomic_read(&hdlcd->bus_error_count)); + seq_printf(m, "vsync : %d\n", atomic_read(&hdlcd->vsync_count)); + return 0; +} + +static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *drm = node->minor->dev; + struct hdlcd_drm_private *hdlcd = drm->dev_private; + unsigned long clkrate = clk_get_rate(hdlcd->clk); + unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000; + + seq_printf(m, "hw : %lu\n", clkrate); + seq_printf(m, "mode: %lu\n", mode_clock); + return 0; +} + +static struct drm_info_list hdlcd_debugfs_list[] = { + { "interrupt_count", hdlcd_show_underrun_count, 0 }, + { "clocks", hdlcd_show_pxlclock, 0 }, +}; + +static int hdlcd_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(hdlcd_debugfs_list, + ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor); +} + +static void hdlcd_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files(hdlcd_debugfs_list, + ARRAY_SIZE(hdlcd_debugfs_list), minor); +} +#endif + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = noop_llseek, + .mmap = drm_gem_cma_mmap, +}; + +static struct drm_driver hdlcd_driver = { + .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | + DRIVER_MODESET | DRIVER_PRIME | + DRIVER_ATOMIC, + .lastclose = hdlcd_lastclose, + .irq_handler = hdlcd_irq, + .irq_preinstall = hdlcd_irq_preinstall, + .irq_postinstall = hdlcd_irq_postinstall, + .irq_uninstall = hdlcd_irq_uninstall, + .get_vblank_counter = drm_vblank_no_hw_counter, + .enable_vblank = hdlcd_enable_vblank, + .disable_vblank = hdlcd_disable_vblank, + .gem_free_object = drm_gem_cma_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .dumb_create = drm_gem_cma_dumb_create, + .dumb_map_offset = drm_gem_cma_dumb_map_offset, + .dumb_destroy = drm_gem_dumb_destroy, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = hdlcd_debugfs_init, + .debugfs_cleanup = hdlcd_debugfs_cleanup, +#endif + .fops = &fops, + .name = "hdlcd", + .desc = "ARM HDLCD Controller DRM", + .date = "20151021", + .major = 1, + .minor = 0, +}; + +static int hdlcd_drm_bind(struct device *dev) +{ + struct drm_device *drm; + struct hdlcd_drm_private *hdlcd; + int ret; + + hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL); + if (!hdlcd) + return -ENOMEM; + + drm = drm_dev_alloc(&hdlcd_driver, dev); + if (!drm) + return -ENOMEM; + + drm->dev_private = hdlcd; + hdlcd_setup_mode_config(drm); + ret = hdlcd_load(drm, 0); + if (ret) + goto err_free; + + ret = drm_dev_register(drm, 0); + if (ret) + goto err_unload; + + dev_set_drvdata(dev, drm); + + ret = component_bind_all(dev, drm); + if (ret) { + DRM_ERROR("Failed to bind all components\n"); + goto err_unregister; + } + + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret < 0) { + DRM_ERROR("failed to initialise vblank\n"); + goto err_vblank; + } + drm->vblank_disable_allowed = true; + + drm_mode_config_reset(drm); + drm_kms_helper_poll_init(drm); + + hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, + drm->mode_config.num_connector); + + if (IS_ERR(hdlcd->fbdev)) { + ret = PTR_ERR(hdlcd->fbdev); + hdlcd->fbdev = NULL; + goto err_fbdev; + } + + return 0; + +err_fbdev: + drm_kms_helper_poll_fini(drm); + drm_mode_config_cleanup(drm); + drm_vblank_cleanup(drm); +err_vblank: + component_unbind_all(dev, drm); +err_unregister: + drm_dev_unregister(drm); +err_unload: + pm_runtime_get_sync(drm->dev); + drm_irq_uninstall(drm); + pm_runtime_put_sync(drm->dev); + pm_runtime_disable(drm->dev); + of_reserved_mem_device_release(drm->dev); + devm_clk_put(dev, hdlcd->clk); +err_free: + drm_dev_unref(drm); + + return ret; +} + +static void hdlcd_drm_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct hdlcd_drm_private *hdlcd = drm->dev_private; + + if (hdlcd->fbdev) { + drm_fbdev_cma_fini(hdlcd->fbdev); + hdlcd->fbdev = NULL; + } + drm_kms_helper_poll_fini(drm); + component_unbind_all(dev, drm); + drm_vblank_cleanup(drm); + pm_runtime_get_sync(drm->dev); + drm_irq_uninstall(drm); + pm_runtime_put_sync(drm->dev); + pm_runtime_disable(drm->dev); + of_reserved_mem_device_release(drm->dev); + if (!IS_ERR(hdlcd->clk)) { + devm_clk_put(drm->dev, hdlcd->clk); + hdlcd->clk = NULL; + } + drm_mode_config_cleanup(drm); + drm_dev_unregister(drm); + drm_dev_unref(drm); + drm->dev_private = NULL; + dev_set_drvdata(dev, NULL); +} + +static const struct component_master_ops hdlcd_master_ops = { + .bind = hdlcd_drm_bind, + .unbind = hdlcd_drm_unbind, +}; + +static int compare_dev(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int hdlcd_probe(struct platform_device *pdev) +{ + struct device_node *port, *ep; + struct component_match *match = NULL; + + if (!pdev->dev.of_node) + return -ENODEV; + + /* there is only one output port inside each device, find it */ + ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); + if (!ep) + return -ENODEV; + + if (!of_device_is_available(ep)) { + of_node_put(ep); + return -ENODEV; + } + + /* add the remote encoder port as component */ + port = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (!port || !of_device_is_available(port)) { + of_node_put(port); + return -EAGAIN; + } + + component_match_add(&pdev->dev, &match, compare_dev, port); + + return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops, + match); +} + +static int hdlcd_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &hdlcd_master_ops); + return 0; +} + +static const struct of_device_id hdlcd_of_match[] = { + { .compatible = "arm,hdlcd" }, + {}, +}; +MODULE_DEVICE_TABLE(of, hdlcd_of_match); + +static int __maybe_unused hdlcd_pm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct drm_crtc *crtc; + + if (pm_runtime_suspended(dev)) + return 0; + + drm_modeset_lock_all(drm); + list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) + hdlcd_crtc_suspend(crtc); + drm_modeset_unlock_all(drm); + return 0; +} + +static int __maybe_unused hdlcd_pm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct drm_crtc *crtc; + + if (!pm_runtime_suspended(dev)) + return 0; + + drm_modeset_lock_all(drm); + list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) + hdlcd_crtc_resume(crtc); + drm_modeset_unlock_all(drm); + return 0; +} + +static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume); + +static struct platform_driver hdlcd_platform_driver = { + .probe = hdlcd_probe, + .remove = hdlcd_remove, + .driver = { + .name = "hdlcd", + .pm = &hdlcd_pm_ops, + .of_match_table = hdlcd_of_match, + }, +}; + +module_platform_driver(hdlcd_platform_driver); + +MODULE_AUTHOR("Liviu Dudau"); +MODULE_DESCRIPTION("ARM HDLCD DRM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h new file mode 100644 index 000000000000..aa234784f053 --- /dev/null +++ b/drivers/gpu/drm/arm/hdlcd_drv.h @@ -0,0 +1,42 @@ +/* + * ARM HDLCD Controller register definition + */ + +#ifndef __HDLCD_DRV_H__ +#define __HDLCD_DRV_H__ + +struct hdlcd_drm_private { + void __iomem *mmio; + struct clk *clk; + struct drm_fbdev_cma *fbdev; + struct drm_framebuffer *fb; + struct list_head event_list; + struct drm_crtc crtc; + struct drm_plane *plane; +#ifdef CONFIG_DEBUG_FS + atomic_t buffer_underrun_count; + atomic_t bus_error_count; + atomic_t vsync_count; + atomic_t dma_end_count; +#endif +}; + +#define crtc_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, crtc) + +static inline void hdlcd_write(struct hdlcd_drm_private *hdlcd, + unsigned int reg, u32 value) +{ + writel(value, hdlcd->mmio + reg); +} + +static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg) +{ + return readl(hdlcd->mmio + reg); +} + +int hdlcd_setup_crtc(struct drm_device *dev); +void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); +void hdlcd_crtc_suspend(struct drm_crtc *crtc); +void hdlcd_crtc_resume(struct drm_crtc *crtc); + +#endif /* __HDLCD_DRV_H__ */ diff --git a/drivers/gpu/drm/arm/hdlcd_regs.h b/drivers/gpu/drm/arm/hdlcd_regs.h new file mode 100644 index 000000000000..66799ebef6d3 --- /dev/null +++ b/drivers/gpu/drm/arm/hdlcd_regs.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2013,2014 ARM Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * ARM HDLCD Controller register definition + */ + +#ifndef __HDLCD_REGS_H__ +#define __HDLCD_REGS_H__ + +/* register offsets */ +#define HDLCD_REG_VERSION 0x0000 /* ro */ +#define HDLCD_REG_INT_RAWSTAT 0x0010 /* rw */ +#define HDLCD_REG_INT_CLEAR 0x0014 /* wo */ +#define HDLCD_REG_INT_MASK 0x0018 /* rw */ +#define HDLCD_REG_INT_STATUS 0x001c /* ro */ +#define HDLCD_REG_FB_BASE 0x0100 /* rw */ +#define HDLCD_REG_FB_LINE_LENGTH 0x0104 /* rw */ +#define HDLCD_REG_FB_LINE_COUNT 0x0108 /* rw */ +#define HDLCD_REG_FB_LINE_PITCH 0x010c /* rw */ +#define HDLCD_REG_BUS_OPTIONS 0x0110 /* rw */ +#define HDLCD_REG_V_SYNC 0x0200 /* rw */ +#define HDLCD_REG_V_BACK_PORCH 0x0204 /* rw */ +#define HDLCD_REG_V_DATA 0x0208 /* rw */ +#define HDLCD_REG_V_FRONT_PORCH 0x020c /* rw */ +#define HDLCD_REG_H_SYNC 0x0210 /* rw */ +#define HDLCD_REG_H_BACK_PORCH 0x0214 /* rw */ +#define HDLCD_REG_H_DATA 0x0218 /* rw */ +#define HDLCD_REG_H_FRONT_PORCH 0x021c /* rw */ +#define HDLCD_REG_POLARITIES 0x0220 /* rw */ +#define HDLCD_REG_COMMAND 0x0230 /* rw */ +#define HDLCD_REG_PIXEL_FORMAT 0x0240 /* rw */ +#define HDLCD_REG_RED_SELECT 0x0244 /* rw */ +#define HDLCD_REG_GREEN_SELECT 0x0248 /* rw */ +#define HDLCD_REG_BLUE_SELECT 0x024c /* rw */ + +/* version */ +#define HDLCD_PRODUCT_ID 0x1CDC0000 +#define HDLCD_PRODUCT_MASK 0xFFFF0000 +#define HDLCD_VERSION_MAJOR_MASK 0x0000FF00 +#define HDLCD_VERSION_MINOR_MASK 0x000000FF + +/* interrupts */ +#define HDLCD_INTERRUPT_DMA_END (1 << 0) +#define HDLCD_INTERRUPT_BUS_ERROR (1 << 1) +#define HDLCD_INTERRUPT_VSYNC (1 << 2) +#define HDLCD_INTERRUPT_UNDERRUN (1 << 3) +#define HDLCD_DEBUG_INT_MASK (HDLCD_INTERRUPT_DMA_END | \ + HDLCD_INTERRUPT_BUS_ERROR | \ + HDLCD_INTERRUPT_UNDERRUN) + +/* polarities */ +#define HDLCD_POLARITY_VSYNC (1 << 0) +#define HDLCD_POLARITY_HSYNC (1 << 1) +#define HDLCD_POLARITY_DATAEN (1 << 2) +#define HDLCD_POLARITY_DATA (1 << 3) +#define HDLCD_POLARITY_PIXELCLK (1 << 4) + +/* commands */ +#define HDLCD_COMMAND_DISABLE (0 << 0) +#define HDLCD_COMMAND_ENABLE (1 << 0) + +/* pixel format */ +#define HDLCD_PIXEL_FMT_LITTLE_ENDIAN (0 << 31) +#define HDLCD_PIXEL_FMT_BIG_ENDIAN (1 << 31) +#define HDLCD_BYTES_PER_PIXEL_MASK (3 << 3) + +/* bus options */ +#define HDLCD_BUS_BURST_MASK 0x01f +#define HDLCD_BUS_MAX_OUTSTAND 0xf00 +#define HDLCD_BUS_BURST_NONE (0 << 0) +#define HDLCD_BUS_BURST_1 (1 << 0) +#define HDLCD_BUS_BURST_2 (1 << 1) +#define HDLCD_BUS_BURST_4 (1 << 2) +#define HDLCD_BUS_BURST_8 (1 << 3) +#define HDLCD_BUS_BURST_16 (1 << 4) + +/* Max resolution supported is 4096x4096, 32bpp */ +#define HDLCD_MAX_XRES 4096 +#define HDLCD_MAX_YRES 4096 + +#define NR_PALETTE 256 + +#endif /* __HDLCD_REGS_H__ */ diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 3bd7e1cde99e..82043c204b76 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -188,9 +188,6 @@ static const struct file_operations armada_drm_fops = { static struct drm_driver armada_drm_driver = { .load = armada_drm_load, - .open = NULL, - .preclose = NULL, - .postclose = NULL, .lastclose = armada_drm_lastclose, .unload = armada_drm_unload, .set_busid = drm_platform_set_busid, diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 0123458cbd83..f221e2dc1b0d 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -710,13 +710,6 @@ static void ast_encoder_dpms(struct drm_encoder *encoder, int mode) } -static bool ast_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void ast_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -736,7 +729,6 @@ static void ast_encoder_commit(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = { .dpms = ast_encoder_dpms, - .mode_fixup = ast_mode_fixup, .prepare = ast_encoder_prepare, .commit = ast_encoder_commit, .mode_set = ast_encoder_mode_set, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 468a14f266a7..9863291a9a54 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -280,24 +280,6 @@ static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c) kfree(crtc); } -void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *c, - struct drm_file *file) -{ - struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); - struct drm_pending_vblank_event *event; - struct drm_device *dev = c->dev; - unsigned long flags; - - spin_lock_irqsave(&dev->event_lock, flags); - event = crtc->event; - if (event && event->base.file_priv == file) { - event->base.destroy(&event->base); - drm_vblank_put(dev, crtc->id); - crtc->event = NULL; - } - spin_unlock_irqrestore(&dev->event_lock, flags); -} - static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc) { struct drm_device *dev = crtc->base.dev; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index a45b32ba029e..3d8d16402d07 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -619,15 +619,6 @@ static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev) mutex_unlock(&dev->mode_config.mutex); } -static void atmel_hlcdc_dc_preclose(struct drm_device *dev, - struct drm_file *file) -{ - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - atmel_hlcdc_crtc_cancel_page_flip(crtc, file); -} - static void atmel_hlcdc_dc_lastclose(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; @@ -698,7 +689,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, - .preclose = atmel_hlcdc_dc_preclose, .lastclose = atmel_hlcdc_dc_lastclose, .irq_handler = atmel_hlcdc_dc_irq_handler, .irq_preinstall = atmel_hlcdc_dc_irq_uninstall, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h index cf6b375bc38d..fed517f297da 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h @@ -152,9 +152,6 @@ int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state); void atmel_hlcdc_crtc_irq(struct drm_crtc *c); -void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, - struct drm_file *file); - void atmel_hlcdc_crtc_suspend(struct drm_crtc *crtc); void atmel_hlcdc_crtc_resume(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 2849f1b95eec..317c27f2a50b 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -152,13 +152,6 @@ static void bochs_crtc_init(struct drm_device *dev) drm_crtc_helper_add(crtc, &bochs_helper_funcs); } -static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void bochs_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -179,7 +172,6 @@ static void bochs_encoder_commit(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = { .dpms = bochs_encoder_dpms, - .mode_fixup = bochs_encoder_mode_fixup, .mode_set = bochs_encoder_mode_set, .prepare = bochs_encoder_prepare, .commit = bochs_encoder_commit, diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c index b0aac4733020..9795b72472ba 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -1391,13 +1391,6 @@ static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, mutex_unlock(&hdmi->mutex); } -static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void dw_hdmi_bridge_disable(struct drm_bridge *bridge) { struct dw_hdmi *hdmi = bridge->driver_private; @@ -1546,7 +1539,6 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { .pre_enable = dw_hdmi_bridge_nop, .post_disable = dw_hdmi_bridge_nop, .mode_set = dw_hdmi_bridge_mode_set, - .mode_fixup = dw_hdmi_bridge_mode_fixup, }; static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id) diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 4a02854a6963..432ce9440e09 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -430,14 +430,6 @@ void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = cirrus_crtc->lut_b[regno]; } - -static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void cirrus_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -466,7 +458,6 @@ static void cirrus_encoder_destroy(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = { .dpms = cirrus_encoder_dpms, - .mode_fixup = cirrus_encoder_mode_fixup, .mode_set = cirrus_encoder_mode_set, .prepare = cirrus_encoder_prepare, .commit = cirrus_encoder_commit, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3f74193885f1..8fb469c4e4b8 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1347,44 +1347,23 @@ static struct drm_pending_vblank_event *create_vblank_event( struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data) { struct drm_pending_vblank_event *e = NULL; - unsigned long flags; - - spin_lock_irqsave(&dev->event_lock, flags); - if (file_priv->event_space < sizeof e->event) { - spin_unlock_irqrestore(&dev->event_lock, flags); - goto out; - } - file_priv->event_space -= sizeof e->event; - spin_unlock_irqrestore(&dev->event_lock, flags); + int ret; e = kzalloc(sizeof *e, GFP_KERNEL); - if (e == NULL) { - spin_lock_irqsave(&dev->event_lock, flags); - file_priv->event_space += sizeof e->event; - spin_unlock_irqrestore(&dev->event_lock, flags); - goto out; - } + if (!e) + return NULL; e->event.base.type = DRM_EVENT_FLIP_COMPLETE; - e->event.base.length = sizeof e->event; + e->event.base.length = sizeof(e->event); e->event.user_data = user_data; - e->base.event = &e->event.base; - e->base.file_priv = file_priv; - e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; - -out: - return e; -} -static void destroy_vblank_event(struct drm_device *dev, - struct drm_file *file_priv, struct drm_pending_vblank_event *e) -{ - unsigned long flags; + ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); + if (ret) { + kfree(e); + return NULL; + } - spin_lock_irqsave(&dev->event_lock, flags); - file_priv->event_space += sizeof e->event; - spin_unlock_irqrestore(&dev->event_lock, flags); - kfree(e); + return e; } static int atomic_set_prop(struct drm_atomic_state *state, @@ -1646,8 +1625,7 @@ out: if (!crtc_state->event) continue; - destroy_vblank_event(dev, file_priv, - crtc_state->event); + drm_event_cancel_free(dev, &crtc_state->event->base); } } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 7c523060a076..2b430b05f35d 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -125,6 +125,47 @@ get_current_crtc_for_encoder(struct drm_device *dev, return NULL; } +static void +set_best_encoder(struct drm_atomic_state *state, + struct drm_connector_state *conn_state, + struct drm_encoder *encoder) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + + if (conn_state->best_encoder) { + /* Unset the encoder_mask in the old crtc state. */ + crtc = conn_state->connector->state->crtc; + + /* A NULL crtc is an error here because we should have + * duplicated a NULL best_encoder when crtc was NULL. + * As an exception restoring duplicated atomic state + * during resume is allowed, so don't warn when + * best_encoder is equal to encoder we intend to set. + */ + WARN_ON(!crtc && encoder != conn_state->best_encoder); + if (crtc) { + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + + crtc_state->encoder_mask &= + ~(1 << drm_encoder_index(conn_state->best_encoder)); + } + } + + if (encoder) { + crtc = conn_state->crtc; + WARN_ON(!crtc); + if (crtc) { + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + + crtc_state->encoder_mask |= + 1 << drm_encoder_index(encoder); + } + } + + conn_state->best_encoder = encoder; +} + static int steal_encoder(struct drm_atomic_state *state, struct drm_encoder *encoder, @@ -134,7 +175,6 @@ steal_encoder(struct drm_atomic_state *state, struct drm_crtc_state *crtc_state; struct drm_connector *connector; struct drm_connector_state *connector_state; - int ret; /* * We can only steal an encoder coming from a connector, which means we @@ -165,10 +205,10 @@ steal_encoder(struct drm_atomic_state *state, if (IS_ERR(connector_state)) return PTR_ERR(connector_state); - ret = drm_atomic_set_crtc_for_connector(connector_state, NULL); - if (ret) - return ret; - connector_state->best_encoder = NULL; + if (connector_state->best_encoder != encoder) + continue; + + set_best_encoder(state, connector_state, NULL); } return 0; @@ -216,7 +256,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) connector->base.id, connector->name); - connector_state->best_encoder = NULL; + set_best_encoder(state, connector_state, NULL); return 0; } @@ -245,6 +285,8 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } if (new_encoder == connector_state->best_encoder) { + set_best_encoder(state, connector_state, new_encoder); + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", connector->base.id, connector->name, @@ -279,7 +321,8 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) if (WARN_ON(!connector_state->crtc)) return -EINVAL; - connector_state->best_encoder = new_encoder; + set_best_encoder(state, connector_state, new_encoder); + idx = drm_crtc_index(connector_state->crtc); crtc_state = state->crtc_states[idx]; @@ -617,7 +660,6 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) for_each_connector_in_state(old_state, connector, old_conn_state, i) { const struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; - struct drm_crtc_state *old_crtc_state; /* Shut down everything that's in the changeset and currently * still on. So need to check the old, saved state. */ @@ -2549,8 +2591,10 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane) kfree(plane->state); plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL); - if (plane->state) + if (plane->state) { plane->state->plane = plane; + plane->state->rotation = BIT(DRM_ROTATE_0); + } } EXPORT_SYMBOL(drm_atomic_helper_plane_reset); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index d40bab29747e..65258acddb90 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1161,6 +1161,29 @@ out_unlock: EXPORT_SYMBOL(drm_encoder_init); /** + * drm_encoder_index - find the index of a registered encoder + * @encoder: encoder to find index for + * + * Given a registered encoder, return the index of that encoder within a DRM + * device's list of encoders. + */ +unsigned int drm_encoder_index(struct drm_encoder *encoder) +{ + unsigned int index = 0; + struct drm_encoder *tmp; + + drm_for_each_encoder(tmp, encoder->dev) { + if (tmp == encoder) + return index; + + index++; + } + + BUG(); +} +EXPORT_SYMBOL(drm_encoder_index); + +/** * drm_encoder_cleanup - cleans up an initialised encoder * @encoder: encoder to cleanup * @@ -5265,7 +5288,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, struct drm_crtc *crtc; struct drm_framebuffer *fb = NULL; struct drm_pending_vblank_event *e = NULL; - unsigned long flags; int ret = -EINVAL; if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || @@ -5316,41 +5338,26 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, } if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { - ret = -ENOMEM; - spin_lock_irqsave(&dev->event_lock, flags); - if (file_priv->event_space < sizeof(e->event)) { - spin_unlock_irqrestore(&dev->event_lock, flags); - goto out; - } - file_priv->event_space -= sizeof(e->event); - spin_unlock_irqrestore(&dev->event_lock, flags); - - e = kzalloc(sizeof(*e), GFP_KERNEL); - if (e == NULL) { - spin_lock_irqsave(&dev->event_lock, flags); - file_priv->event_space += sizeof(e->event); - spin_unlock_irqrestore(&dev->event_lock, flags); + e = kzalloc(sizeof *e, GFP_KERNEL); + if (!e) { + ret = -ENOMEM; goto out; } - e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof(e->event); e->event.user_data = page_flip->user_data; - e->base.event = &e->event.base; - e->base.file_priv = file_priv; - e->base.destroy = - (void (*) (struct drm_pending_event *)) kfree; + ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); + if (ret) { + kfree(e); + goto out; + } } crtc->primary->old_fb = crtc->primary->fb; ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); if (ret) { - if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { - spin_lock_irqsave(&dev->event_lock, flags); - file_priv->event_space += sizeof(e->event); - spin_unlock_irqrestore(&dev->event_lock, flags); - kfree(e); - } + if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) + drm_event_cancel_free(dev, &e->base); /* Keep the old fb, don't unref it. */ crtc->primary->old_fb = NULL; } else { @@ -5731,6 +5738,48 @@ int drm_format_vert_chroma_subsampling(uint32_t format) EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); /** + * drm_format_plane_width - width of the plane given the first plane + * @width: width of the first plane + * @format: pixel format + * @plane: plane index + * + * Returns: + * The width of @plane, given that the width of the first plane is @width. + */ +int drm_format_plane_width(int width, uint32_t format, int plane) +{ + if (plane >= drm_format_num_planes(format)) + return 0; + + if (plane == 0) + return width; + + return width / drm_format_horz_chroma_subsampling(format); +} +EXPORT_SYMBOL(drm_format_plane_width); + +/** + * drm_format_plane_height - height of the plane given the first plane + * @height: height of the first plane + * @format: pixel format + * @plane: plane index + * + * Returns: + * The height of @plane, given that the height of the first plane is @height. + */ +int drm_format_plane_height(int height, uint32_t format, int plane) +{ + if (plane >= drm_format_num_planes(format)) + return 0; + + if (plane == 0) + return height; + + return height / drm_format_vert_chroma_subsampling(format); +} +EXPORT_SYMBOL(drm_format_plane_height); + +/** * drm_rotation_simplify() - Try to simplify the rotation * @rotation: Rotation to be simplified * @supported_rotations: Supported rotations diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a02a7f9a6a9d..7539eea4ccbc 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -73,9 +73,6 @@ * &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct * &drm_connector_helper_funcs. */ -MODULE_AUTHOR("David Airlie, Jesse Barnes"); -MODULE_DESCRIPTION("DRM KMS helper"); -MODULE_LICENSE("GPL and additional rights"); /** * drm_helper_move_panel_connectors_to_head() - move panels to the front in the @@ -220,6 +217,15 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) * disconnected connectors. Then it will disable all unused encoders and CRTCs * either by calling their disable callback if available or by calling their * dpms callback with DRM_MODE_DPMS_OFF. + * + * NOTE: + * + * This function is part of the legacy modeset helper library and will cause + * major confusion with atomic drivers. This is because atomic helpers guarantee + * to never call ->disable() hooks on a disabled function, or ->enable() hooks + * on an enabled functions. drm_helper_disable_unused_functions() on the other + * hand throws such guarantees into the wind and calls disable hooks + * unconditionally on unused functions. */ void drm_helper_disable_unused_functions(struct drm_device *dev) { @@ -328,16 +334,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, } encoder_funcs = encoder->helper_private; - if (!(ret = encoder_funcs->mode_fixup(encoder, mode, - adjusted_mode))) { - DRM_DEBUG_KMS("Encoder fixup failed\n"); - goto done; + if (encoder_funcs->mode_fixup) { + if (!(ret = encoder_funcs->mode_fixup(encoder, mode, + adjusted_mode))) { + DRM_DEBUG_KMS("Encoder fixup failed\n"); + goto done; + } } } - if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { - DRM_DEBUG_KMS("CRTC fixup failed\n"); - goto done; + if (crtc_funcs->mode_fixup) { + if (!(ret = crtc_funcs->mode_fixup(crtc, mode, + adjusted_mode))) { + DRM_DEBUG_KMS("CRTC fixup failed\n"); + goto done; + } } DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); @@ -578,8 +589,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (set->crtc->primary->fb == NULL) { DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); mode_changed = true; - } else if (set->fb == NULL) { - mode_changed = true; } else if (set->fb->pixel_format != set->crtc->primary->fb->pixel_format) { mode_changed = true; @@ -590,7 +599,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (set->x != set->crtc->x || set->y != set->crtc->y) fb_changed = true; - if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { + if (!drm_mode_equal(set->mode, &set->crtc->mode)) { DRM_DEBUG_KMS("modes are different, full mode set\n"); drm_mode_debug_printmodeline(&set->crtc->mode); drm_mode_debug_printmodeline(set->mode); diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c new file mode 100644 index 000000000000..f73b38b33a8e --- /dev/null +++ b/drivers/gpu/drm/drm_dp_aux_dev.c @@ -0,0 +1,368 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Rafael Antognolli <rafael.antognolli@intel.com> + * + */ + +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drmP.h> + +struct drm_dp_aux_dev { + unsigned index; + struct drm_dp_aux *aux; + struct device *dev; + struct kref refcount; + atomic_t usecount; +}; + +#define DRM_AUX_MINORS 256 +#define AUX_MAX_OFFSET (1 << 20) +static DEFINE_IDR(aux_idr); +static DEFINE_MUTEX(aux_idr_mutex); +static struct class *drm_dp_aux_dev_class; +static int drm_dev_major = -1; + +static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index) +{ + struct drm_dp_aux_dev *aux_dev = NULL; + + mutex_lock(&aux_idr_mutex); + aux_dev = idr_find(&aux_idr, index); + if (!kref_get_unless_zero(&aux_dev->refcount)) + aux_dev = NULL; + mutex_unlock(&aux_idr_mutex); + + return aux_dev; +} + +static struct drm_dp_aux_dev *alloc_drm_dp_aux_dev(struct drm_dp_aux *aux) +{ + struct drm_dp_aux_dev *aux_dev; + int index; + + aux_dev = kzalloc(sizeof(*aux_dev), GFP_KERNEL); + if (!aux_dev) + return ERR_PTR(-ENOMEM); + aux_dev->aux = aux; + atomic_set(&aux_dev->usecount, 1); + kref_init(&aux_dev->refcount); + + mutex_lock(&aux_idr_mutex); + index = idr_alloc_cyclic(&aux_idr, aux_dev, 0, DRM_AUX_MINORS, + GFP_KERNEL); + mutex_unlock(&aux_idr_mutex); + if (index < 0) { + kfree(aux_dev); + return ERR_PTR(index); + } + aux_dev->index = index; + + return aux_dev; +} + +static void release_drm_dp_aux_dev(struct kref *ref) +{ + struct drm_dp_aux_dev *aux_dev = + container_of(ref, struct drm_dp_aux_dev, refcount); + + kfree(aux_dev); +} + +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t res; + struct drm_dp_aux_dev *aux_dev = + drm_dp_aux_dev_get_by_minor(MINOR(dev->devt)); + + if (!aux_dev) + return -ENODEV; + + res = sprintf(buf, "%s\n", aux_dev->aux->name); + kref_put(&aux_dev->refcount, release_drm_dp_aux_dev); + + return res; +} +static DEVICE_ATTR_RO(name); + +static struct attribute *drm_dp_aux_attrs[] = { + &dev_attr_name.attr, + NULL, +}; +ATTRIBUTE_GROUPS(drm_dp_aux); + +static int auxdev_open(struct inode *inode, struct file *file) +{ + unsigned int minor = iminor(inode); + struct drm_dp_aux_dev *aux_dev; + + aux_dev = drm_dp_aux_dev_get_by_minor(minor); + if (!aux_dev) + return -ENODEV; + + file->private_data = aux_dev; + return 0; +} + +static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence) +{ + return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET); +} + +static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count, + loff_t *offset) +{ + size_t bytes_pending, num_bytes_processed = 0; + struct drm_dp_aux_dev *aux_dev = file->private_data; + ssize_t res = 0; + + if (!atomic_inc_not_zero(&aux_dev->usecount)) + return -ENODEV; + + bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - (*offset)); + + if (!access_ok(VERIFY_WRITE, buf, bytes_pending)) { + res = -EFAULT; + goto out; + } + + while (bytes_pending > 0) { + uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; + ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); + + res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo); + if (res <= 0) { + res = num_bytes_processed ? num_bytes_processed : res; + goto out; + } + if (__copy_to_user(buf + num_bytes_processed, localbuf, res)) { + res = num_bytes_processed ? + num_bytes_processed : -EFAULT; + goto out; + } + bytes_pending -= res; + *offset += res; + num_bytes_processed += res; + res = num_bytes_processed; + } + +out: + atomic_dec(&aux_dev->usecount); + wake_up_atomic_t(&aux_dev->usecount); + return res; +} + +static ssize_t auxdev_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + size_t bytes_pending, num_bytes_processed = 0; + struct drm_dp_aux_dev *aux_dev = file->private_data; + ssize_t res = 0; + + if (!atomic_inc_not_zero(&aux_dev->usecount)) + return -ENODEV; + + bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - *offset); + + if (!access_ok(VERIFY_READ, buf, bytes_pending)) { + res = -EFAULT; + goto out; + } + + while (bytes_pending > 0) { + uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; + ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); + + if (__copy_from_user(localbuf, + buf + num_bytes_processed, todo)) { + res = num_bytes_processed ? + num_bytes_processed : -EFAULT; + goto out; + } + + res = drm_dp_dpcd_write(aux_dev->aux, *offset, localbuf, todo); + if (res <= 0) { + res = num_bytes_processed ? num_bytes_processed : res; + goto out; + } + bytes_pending -= res; + *offset += res; + num_bytes_processed += res; + res = num_bytes_processed; + } + +out: + atomic_dec(&aux_dev->usecount); + wake_up_atomic_t(&aux_dev->usecount); + return res; +} + +static int auxdev_release(struct inode *inode, struct file *file) +{ + struct drm_dp_aux_dev *aux_dev = file->private_data; + + kref_put(&aux_dev->refcount, release_drm_dp_aux_dev); + return 0; +} + +static const struct file_operations auxdev_fops = { + .owner = THIS_MODULE, + .llseek = auxdev_llseek, + .read = auxdev_read, + .write = auxdev_write, + .open = auxdev_open, + .release = auxdev_release, +}; + +#define to_auxdev(d) container_of(d, struct drm_dp_aux_dev, aux) + +static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux) +{ + struct drm_dp_aux_dev *iter, *aux_dev = NULL; + int id; + + /* don't increase kref count here because this function should only be + * used by drm_dp_aux_unregister_devnode. Thus, it will always have at + * least one reference - the one that drm_dp_aux_register_devnode + * created + */ + mutex_lock(&aux_idr_mutex); + idr_for_each_entry(&aux_idr, iter, id) { + if (iter->aux == aux) { + aux_dev = iter; + break; + } + } + mutex_unlock(&aux_idr_mutex); + return aux_dev; +} + +static int auxdev_wait_atomic_t(atomic_t *p) +{ + schedule(); + return 0; +} +/** + * drm_dp_aux_unregister_devnode() - unregister a devnode for this aux channel + * @aux: DisplayPort AUX channel + * + * Returns 0 on success or a negative error code on failure. + */ +void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) +{ + struct drm_dp_aux_dev *aux_dev; + unsigned int minor; + + aux_dev = drm_dp_aux_dev_get_by_aux(aux); + if (!aux_dev) /* attach must have failed */ + return; + + mutex_lock(&aux_idr_mutex); + idr_remove(&aux_idr, aux_dev->index); + mutex_unlock(&aux_idr_mutex); + + atomic_dec(&aux_dev->usecount); + wait_on_atomic_t(&aux_dev->usecount, auxdev_wait_atomic_t, + TASK_UNINTERRUPTIBLE); + + minor = aux_dev->index; + if (aux_dev->dev) + device_destroy(drm_dp_aux_dev_class, + MKDEV(drm_dev_major, minor)); + + DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name); + kref_put(&aux_dev->refcount, release_drm_dp_aux_dev); +} +EXPORT_SYMBOL(drm_dp_aux_unregister_devnode); + +/** + * drm_dp_aux_register_devnode() - register a devnode for this aux channel + * @aux: DisplayPort AUX channel + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_aux_register_devnode(struct drm_dp_aux *aux) +{ + struct drm_dp_aux_dev *aux_dev; + int res; + + aux_dev = alloc_drm_dp_aux_dev(aux); + if (IS_ERR(aux_dev)) + return PTR_ERR(aux_dev); + + aux_dev->dev = device_create(drm_dp_aux_dev_class, aux->dev, + MKDEV(drm_dev_major, aux_dev->index), NULL, + "drm_dp_aux%d", aux_dev->index); + if (IS_ERR(aux_dev->dev)) { + res = PTR_ERR(aux_dev->dev); + aux_dev->dev = NULL; + goto error; + } + + DRM_DEBUG("drm_dp_aux_dev: aux [%s] registered as minor %d\n", + aux->name, aux_dev->index); + return 0; +error: + drm_dp_aux_unregister_devnode(aux); + return res; +} +EXPORT_SYMBOL(drm_dp_aux_register_devnode); + +int drm_dp_aux_dev_init(void) +{ + int res; + + drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev"); + if (IS_ERR(drm_dp_aux_dev_class)) { + res = PTR_ERR(drm_dp_aux_dev_class); + goto out; + } + drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups; + + res = register_chrdev(0, "aux", &auxdev_fops); + if (res < 0) + goto out; + drm_dev_major = res; + + return 0; +out: + class_destroy(drm_dp_aux_dev_class); + return res; +} +EXPORT_SYMBOL(drm_dp_aux_dev_init); + +void drm_dp_aux_dev_exit(void) +{ + unregister_chrdev(drm_dev_major, "aux"); + class_destroy(drm_dp_aux_dev_class); +} +EXPORT_SYMBOL(drm_dp_aux_dev_exit); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 9535c5b60387..7d58f594cffe 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -28,6 +28,7 @@ #include <linux/sched.h> #include <linux/i2c.h> #include <drm/drm_dp_helper.h> +#include <drm/drm_dp_aux_dev.h> #include <drm/drmP.h> /** @@ -754,6 +755,8 @@ static const struct i2c_algorithm drm_dp_i2c_algo = { */ int drm_dp_aux_register(struct drm_dp_aux *aux) { + int ret; + mutex_init(&aux->hw_mutex); aux->ddc.algo = &drm_dp_i2c_algo; @@ -768,7 +771,17 @@ int drm_dp_aux_register(struct drm_dp_aux *aux) strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), sizeof(aux->ddc.name)); - return i2c_add_adapter(&aux->ddc); + ret = drm_dp_aux_register_devnode(aux); + if (ret) + return ret; + + ret = i2c_add_adapter(&aux->ddc); + if (ret) { + drm_dp_aux_unregister_devnode(aux); + return ret; + } + + return 0; } EXPORT_SYMBOL(drm_dp_aux_register); @@ -778,6 +791,7 @@ EXPORT_SYMBOL(drm_dp_aux_register); */ void drm_dp_aux_unregister(struct drm_dp_aux *aux) { + drm_dp_aux_unregister_devnode(aux); i2c_del_adapter(&aux->ddc); } EXPORT_SYMBOL(drm_dp_aux_unregister); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 04cb4877fabd..fdb1eb014586 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -32,6 +32,7 @@ #include <linux/hdmi.h> #include <linux/i2c.h> #include <linux/module.h> +#include <linux/vga_switcheroo.h> #include <drm/drmP.h> #include <drm/drm_edid.h> #include <drm/drm_displayid.h> @@ -1395,6 +1396,31 @@ struct edid *drm_get_edid(struct drm_connector *connector, EXPORT_SYMBOL(drm_get_edid); /** + * drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output + * @connector: connector we're probing + * @adapter: I2C adapter to use for DDC + * + * Wrapper around drm_get_edid() for laptops with dual GPUs using one set of + * outputs. The wrapper adds the requisite vga_switcheroo calls to temporarily + * switch DDC to the GPU which is retrieving EDID. + * + * Return: Pointer to valid EDID or %NULL if we couldn't find any. + */ +struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct pci_dev *pdev = connector->dev->pdev; + struct edid *edid; + + vga_switcheroo_lock_ddc(pdev); + edid = drm_get_edid(connector, adapter); + vga_switcheroo_unlock_ddc(pdev); + + return edid; +} +EXPORT_SYMBOL(drm_get_edid_switcheroo); + +/** * drm_edid_duplicate - duplicate an EDID and the extensions * @edid: EDID to duplicate * diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index e8629076de32..4484785cd9ac 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -140,6 +140,9 @@ bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + if (!get_slave_funcs(encoder)->mode_fixup) + return true; + return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode); } EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index c895b6fddbd8..bb88e3df9257 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -74,7 +74,8 @@ static struct drm_framebuffer_funcs drm_fb_cma_funcs = { }; static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, - const const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_cma_object **obj, unsigned int num_planes) { struct drm_fb_cma *fb_cma; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1e103c4c6ee0..855108e6e1bd 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -104,21 +104,17 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; - int i; + int i, ret; if (!drm_fbdev_emulation) return 0; mutex_lock(&dev->mode_config.mutex); drm_for_each_connector(connector, dev) { - struct drm_fb_helper_connector *fb_helper_connector; + ret = drm_fb_helper_add_one_connector(fb_helper, connector); - fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); - if (!fb_helper_connector) + if (ret) goto fail; - - fb_helper_connector->connector = connector; - fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; } mutex_unlock(&dev->mode_config.mutex); return 0; @@ -130,7 +126,7 @@ fail: fb_helper->connector_count = 0; mutex_unlock(&dev->mode_config.mutex); - return -ENOMEM; + return ret; } EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); @@ -1989,13 +1985,13 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) width = dev->mode_config.max_width; height = dev->mode_config.max_height; - crtcs = kcalloc(dev->mode_config.num_connector, + crtcs = kcalloc(fb_helper->connector_count, sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); - modes = kcalloc(dev->mode_config.num_connector, + modes = kcalloc(fb_helper->connector_count, sizeof(struct drm_display_mode *), GFP_KERNEL); - offsets = kcalloc(dev->mode_config.num_connector, + offsets = kcalloc(fb_helper->connector_count, sizeof(struct drm_fb_offset), GFP_KERNEL); - enabled = kcalloc(dev->mode_config.num_connector, + enabled = kcalloc(fb_helper->connector_count, sizeof(bool), GFP_KERNEL); if (!crtcs || !modes || !enabled || !offsets) { DRM_ERROR("Memory allocation failed\n"); @@ -2009,9 +2005,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) fb_helper->funcs->initial_config(fb_helper, crtcs, modes, offsets, enabled, width, height))) { - memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0])); - memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0])); - memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0])); + memset(modes, 0, fb_helper->connector_count*sizeof(modes[0])); + memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0])); + memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0])); if (!drm_target_cloned(fb_helper, modes, offsets, enabled, width, height) && @@ -2091,6 +2087,27 @@ out: * drm_fb_helper_fill_fix() are provided as helpers to setup simple default * values for the fbdev info structure. * + * HANG DEBUGGING: + * + * When you have fbcon support built-in or already loaded, this function will do + * a full modeset to setup the fbdev console. Due to locking misdesign in the + * VT/fbdev subsystem that entire modeset sequence has to be done while holding + * console_lock. Until console_unlock is called no dmesg lines will be sent out + * to consoles, not even serial console. This means when your driver crashes, + * you will see absolutely nothing else but a system stuck in this function, + * with no further output. Any kind of printk() you place within your own driver + * or in the drm core modeset code will also never show up. + * + * Standard debug practice is to run the fbcon setup without taking the + * console_lock as a hack, to be able to see backtraces and crashes on the + * serial line. This can be done by setting the fb.lockless_register_fb=1 kernel + * cmdline option. + * + * The other option is to just disable fbdev emulation since very likely the + * first modest from userspace will crash in the same way, and is even easier to + * debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0 + * kernel cmdline option. + * * RETURNS: * Zero if everything went ok, nonzero otherwise. */ @@ -2175,9 +2192,9 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event); * but the module doesn't depend on any fb console symbols. At least * attempt to load fbcon to avoid leaving the system without a usable console. */ -#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT) -static int __init drm_fb_helper_modinit(void) +int __init drm_fb_helper_modinit(void) { +#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT) const char *name = "fbcon"; struct module *fbcon; @@ -2187,8 +2204,7 @@ static int __init drm_fb_helper_modinit(void) if (!fbcon) request_module_nowait(name); +#endif return 0; } - -module_init(drm_fb_helper_modinit); -#endif +EXPORT_SYMBOL(drm_fb_helper_modinit); diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 1ea8790e5090..aeef58ed359b 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -1,4 +1,4 @@ -/** +/* * \file drm_fops.c * File operations for DRM * @@ -44,6 +44,46 @@ /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); +/** + * DOC: file operations + * + * Drivers must define the file operations structure that forms the DRM + * userspace API entry point, even though most of those operations are + * implemented in the DRM core. The mandatory functions are drm_open(), + * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled. + * Drivers which implement private ioctls that require 32/64 bit compatibility + * support must provided their onw .compat_ioctl() handler that processes + * private ioctls and calls drm_compat_ioctl() for core ioctls. + * + * In addition drm_read() and drm_poll() provide support for DRM events. DRM + * events are a generic and extensible means to send asynchronous events to + * userspace through the file descriptor. They are used to send vblank event and + * page flip completions by the KMS API. But drivers can also use it for their + * own needs, e.g. to signal completion of rendering. + * + * The memory mapping implementation will vary depending on how the driver + * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap() + * function, modern drivers should use one of the provided memory-manager + * specific implementations. For GEM-based drivers this is drm_gem_mmap(). + * + * No other file operations are supported by the DRM userspace API. Overall the + * following is an example #file_operations structure: + * + * static const example_drm_fops = { + * .owner = THIS_MODULE, + * .open = drm_open, + * .release = drm_release, + * .unlocked_ioctl = drm_ioctl, + * #ifdef CONFIG_COMPAT + * .compat_ioctl = drm_compat_ioctl, + * #endif + * .poll = drm_poll, + * .read = drm_read, + * .llseek = no_llseek, + * .mmap = drm_gem_mmap, + * }; + */ + static int drm_open_helper(struct file *filp, struct drm_minor *minor); static int drm_setup(struct drm_device * dev) @@ -67,15 +107,17 @@ static int drm_setup(struct drm_device * dev) } /** - * Open file. + * drm_open - open method for DRM file + * @inode: device inode + * @filp: file pointer. * - * \param inode device inode - * \param filp file pointer. - * \return zero on success or a negative number on failure. + * This function must be used by drivers as their .open() #file_operations + * method. It looks up the correct DRM device and instantiates all the per-file + * resources for it. + * + * RETURNS: * - * Searches the DRM device with the same minor number, calls open_helper(), and - * increments the device open count. If the open count was previous at zero, - * i.e., it's the first that the device is open, then calls setup(). + * 0 on success or negative errno value on falure. */ int drm_open(struct inode *inode, struct file *filp) { @@ -112,7 +154,7 @@ err_undo: } EXPORT_SYMBOL(drm_open); -/** +/* * Check whether DRI will run on this CPU. * * \return non-zero if the DRI will run on this CPU, or zero otherwise. @@ -125,7 +167,7 @@ static int drm_cpu_valid(void) return 1; } -/** +/* * drm_new_set_master - Allocate a new master object and become master for the * associated master realm. * @@ -179,7 +221,7 @@ out_err: return ret; } -/** +/* * Called whenever a process opens /dev/drm. * * \param filp file pointer. @@ -222,6 +264,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) INIT_LIST_HEAD(&priv->fbs); mutex_init(&priv->fbs_lock); INIT_LIST_HEAD(&priv->blobs); + INIT_LIST_HEAD(&priv->pending_event_list); INIT_LIST_HEAD(&priv->event_list); init_waitqueue_head(&priv->event_wait); priv->event_space = 4096; /* set aside 4k for event buffer */ @@ -311,18 +354,16 @@ static void drm_events_release(struct drm_file *file_priv) { struct drm_device *dev = file_priv->minor->dev; struct drm_pending_event *e, *et; - struct drm_pending_vblank_event *v, *vt; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - /* Remove pending flips */ - list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link) - if (v->base.file_priv == file_priv) { - list_del(&v->base.link); - drm_vblank_put(dev, v->pipe); - v->base.destroy(&v->base); - } + /* Unlink pending events */ + list_for_each_entry_safe(e, et, &file_priv->pending_event_list, + pending_link) { + list_del(&e->pending_link); + e->file_priv = NULL; + } /* Remove unconsumed events */ list_for_each_entry_safe(e, et, &file_priv->event_list, link) { @@ -333,7 +374,7 @@ static void drm_events_release(struct drm_file *file_priv) spin_unlock_irqrestore(&dev->event_lock, flags); } -/** +/* * drm_legacy_dev_reinit * * Reinitializes a legacy/ums drm device in it's lastclose function. @@ -350,7 +391,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev) dev->if_version = 0; } -/** +/* * Take down the DRM device. * * \param dev DRM device structure. @@ -387,16 +428,17 @@ int drm_lastclose(struct drm_device * dev) } /** - * Release file. + * drm_release - release method for DRM file + * @inode: device inode + * @filp: file pointer. * - * \param inode device inode - * \param file_priv DRM file private. - * \return zero on success or a negative number on failure. + * This function must be used by drivers as their .release() #file_operations + * method. It frees any resources associated with the open file, and if this is + * the last open file for the DRM device also proceeds to call drm_lastclose(). * - * If the hardware lock is held then free it, and take it again for the kernel - * context since it's necessary to reclaim buffers. Unlink the file private - * data from its list and free it. Decreases the open count and if it reaches - * zero calls drm_lastclose(). + * RETURNS: + * + * Always succeeds and returns 0. */ int drm_release(struct inode *inode, struct file *filp) { @@ -451,7 +493,7 @@ int drm_release(struct inode *inode, struct file *filp) if (file_priv->is_master) { struct drm_master *master = file_priv->master; - /** + /* * Since the master is disappearing, so is the * possibility to lock. */ @@ -508,6 +550,32 @@ int drm_release(struct inode *inode, struct file *filp) } EXPORT_SYMBOL(drm_release); +/** + * drm_read - read method for DRM file + * @filp: file pointer + * @buffer: userspace destination pointer for the read + * @count: count in bytes to read + * @offset: offset to read + * + * This function must be used by drivers as their .read() #file_operations + * method iff they use DRM events for asynchronous signalling to userspace. + * Since events are used by the KMS API for vblank and page flip completion this + * means all modern display drivers must use it. + * + * @offset is ignore, DRM events are read like a pipe. Therefore drivers also + * must set the .llseek() #file_operation to no_llseek(). Polling support is + * provided by drm_poll(). + * + * This function will only ever read a full event. Therefore userspace must + * supply a big enough buffer to fit any event to ensure forward progress. Since + * the maximum event space is currently 4K it's recommended to just use that for + * safety. + * + * RETURNS: + * + * Number of bytes read (always aligned to full events, and can be 0) or a + * negative error code on failure. + */ ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset) { @@ -578,6 +646,22 @@ put_back_event: } EXPORT_SYMBOL(drm_read); +/** + * drm_poll - poll method for DRM file + * @filp: file pointer + * @wait: poll waiter table + * + * This function must be used by drivers as their .read() #file_operations + * method iff they use DRM events for asynchronous signalling to userspace. + * Since events are used by the KMS API for vblank and page flip completion this + * means all modern display drivers must use it. + * + * See also drm_read(). + * + * RETURNS: + * + * Mask of POLL flags indicating the current status of the file. + */ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) { struct drm_file *file_priv = filp->private_data; @@ -591,3 +675,164 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) return mask; } EXPORT_SYMBOL(drm_poll); + +/** + * drm_event_reserve_init_locked - init a DRM event and reserve space for it + * @dev: DRM device + * @file_priv: DRM file private data + * @p: tracking structure for the pending event + * @e: actual event data to deliver to userspace + * + * This function prepares the passed in event for eventual delivery. If the event + * doesn't get delivered (because the IOCTL fails later on, before queuing up + * anything) then the even must be cancelled and freed using + * drm_event_cancel_free(). Successfully initialized events should be sent out + * using drm_send_event() or drm_send_event_locked() to signal completion of the + * asynchronous event to userspace. + * + * If callers embedded @p into a larger structure it must be allocated with + * kmalloc and @p must be the first member element. + * + * This is the locked version of drm_event_reserve_init() for callers which + * already hold dev->event_lock. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ +int drm_event_reserve_init_locked(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e) +{ + if (file_priv->event_space < e->length) + return -ENOMEM; + + file_priv->event_space -= e->length; + + p->event = e; + list_add(&p->pending_link, &file_priv->pending_event_list); + p->file_priv = file_priv; + + /* we *could* pass this in as arg, but everyone uses kfree: */ + p->destroy = (void (*) (struct drm_pending_event *)) kfree; + + return 0; +} +EXPORT_SYMBOL(drm_event_reserve_init_locked); + +/** + * drm_event_reserve_init - init a DRM event and reserve space for it + * @dev: DRM device + * @file_priv: DRM file private data + * @p: tracking structure for the pending event + * @e: actual event data to deliver to userspace + * + * This function prepares the passed in event for eventual delivery. If the event + * doesn't get delivered (because the IOCTL fails later on, before queuing up + * anything) then the even must be cancelled and freed using + * drm_event_cancel_free(). Successfully initialized events should be sent out + * using drm_send_event() or drm_send_event_locked() to signal completion of the + * asynchronous event to userspace. + * + * If callers embedded @p into a larger structure it must be allocated with + * kmalloc and @p must be the first member element. + * + * Callers which already hold dev->event_lock should use + * drm_event_reserve_init() instead. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ +int drm_event_reserve_init(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&dev->event_lock, flags); + ret = drm_event_reserve_init_locked(dev, file_priv, p, e); + spin_unlock_irqrestore(&dev->event_lock, flags); + + return ret; +} +EXPORT_SYMBOL(drm_event_reserve_init); + +/** + * drm_event_cancel_free - free a DRM event and release it's space + * @dev: DRM device + * @p: tracking structure for the pending event + * + * This function frees the event @p initialized with drm_event_reserve_init() + * and releases any allocated space. + */ +void drm_event_cancel_free(struct drm_device *dev, + struct drm_pending_event *p) +{ + unsigned long flags; + spin_lock_irqsave(&dev->event_lock, flags); + if (p->file_priv) { + p->file_priv->event_space += p->event->length; + list_del(&p->pending_link); + } + spin_unlock_irqrestore(&dev->event_lock, flags); + p->destroy(p); +} +EXPORT_SYMBOL(drm_event_cancel_free); + +/** + * drm_send_event_locked - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. Callers must already hold + * dev->event_lock, see drm_send_event() for the unlocked version. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) +{ + assert_spin_locked(&dev->event_lock); + + if (!e->file_priv) { + e->destroy(e); + return; + } + + list_del(&e->pending_link); + list_add_tail(&e->link, + &e->file_priv->event_list); + wake_up_interruptible(&e->file_priv->event_wait); +} +EXPORT_SYMBOL(drm_send_event_locked); + +/** + * drm_send_event - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. This function acquires dev->event_lock, + * see drm_send_event_locked() for callers which already hold this lock. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev->event_lock, irqflags); + drm_send_event_locked(dev, e); + spin_unlock_irqrestore(&dev->event_lock, irqflags); +} +EXPORT_SYMBOL(drm_send_event); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index d12a4efa651b..96d03ac38ef7 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -983,15 +983,12 @@ static void send_vblank_event(struct drm_device *dev, struct drm_pending_vblank_event *e, unsigned long seq, struct timeval *now) { - assert_spin_locked(&dev->event_lock); - e->event.sequence = seq; e->event.tv_sec = now->tv_sec; e->event.tv_usec = now->tv_usec; - list_add_tail(&e->base.link, - &e->base.file_priv->event_list); - wake_up_interruptible(&e->base.file_priv->event_wait); + drm_send_event_locked(dev, &e->base); + trace_drm_vblank_event_delivered(e->base.pid, e->pipe, e->event.sequence); } @@ -1601,9 +1598,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, e->event.base.type = DRM_EVENT_VBLANK; e->event.base.length = sizeof(e->event); e->event.user_data = vblwait->request.signal; - e->base.event = &e->event.base; - e->base.file_priv = file_priv; - e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; spin_lock_irqsave(&dev->event_lock, flags); @@ -1619,12 +1613,12 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, goto err_unlock; } - if (file_priv->event_space < sizeof(e->event)) { - ret = -EBUSY; + ret = drm_event_reserve_init_locked(dev, file_priv, &e->base, + &e->event.base); + + if (ret) goto err_unlock; - } - file_priv->event_space -= sizeof(e->event); seq = drm_vblank_count_and_time(dev, pipe, &now); if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c new file mode 100644 index 000000000000..3187c4bb01cb --- /dev/null +++ b/drivers/gpu/drm/drm_kms_helper_common.c @@ -0,0 +1,60 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Rafael Antognolli <rafael.antognolli@intel.com> + * + */ + +#include <drm/drmP.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_dp_aux_dev.h> + +MODULE_AUTHOR("David Airlie, Jesse Barnes"); +MODULE_DESCRIPTION("DRM KMS helper"); +MODULE_LICENSE("GPL and additional rights"); + +static int __init drm_kms_helper_init(void) +{ + int ret; + + /* Call init functions from specific kms helpers here */ + ret = drm_fb_helper_modinit(); + if (ret < 0) + goto out; + + ret = drm_dp_aux_dev_init(); + if (ret < 0) + goto out; + +out: + return ret; +} + +static void __exit drm_kms_helper_exit(void) +{ + /* Call exit functions from specific kms helpers here */ + drm_dp_aux_dev_exit(); +} + +module_init(drm_kms_helper_init); +module_exit(drm_kms_helper_exit); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 20775c05235a..f7448a5e95a9 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1371,8 +1371,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, } done: if (i >= 0) { - printk(KERN_WARNING - "parse error at position %i in video mode '%s'\n", + pr_warn("[drm] parse error at position %i in video mode '%s'\n", i, name); mode->specified = false; return false; diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 493c05c9ce4f..bc98bb94264d 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -149,3 +149,37 @@ int drm_of_component_probe(struct device *dev, return component_master_add_with_match(dev, m_ops, match); } EXPORT_SYMBOL(drm_of_component_probe); + +/* + * drm_of_encoder_active_endpoint - return the active encoder endpoint + * @node: device tree node containing encoder input ports + * @encoder: drm_encoder + * + * Given an encoder device node and a drm_encoder with a connected crtc, + * parse the encoder endpoint connecting to the crtc port. + */ +int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint) +{ + struct device_node *ep; + struct drm_crtc *crtc = encoder->crtc; + struct device_node *port; + int ret; + + if (!node || !crtc) + return -EINVAL; + + for_each_endpoint_of_node(node, ep) { + port = of_graph_get_remote_port(ep); + of_node_put(port); + if (port == crtc->port) { + ret = of_graph_parse_endpoint(ep, endpoint); + of_node_put(ep); + return ret; + } + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 27aa7183b20b..df6cdc76a16e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -329,7 +329,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { * drm_gem_prime_export - helper library implementation of the export callback * @dev: drm_device to export from * @obj: GEM object to export - * @flags: flags like DRM_CLOEXEC + * @flags: flags like DRM_CLOEXEC and DRM_RDWR * * This is the implementation of the gem_prime_export functions for GEM drivers * using the PRIME helpers. @@ -628,7 +628,6 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_prime_handle *args = data; - uint32_t flags; if (!drm_core_check_feature(dev, DRIVER_PRIME)) return -EINVAL; @@ -637,14 +636,11 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, return -ENOSYS; /* check flags are valid */ - if (args->flags & ~DRM_CLOEXEC) + if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) return -EINVAL; - /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ - flags = args->flags & DRM_CLOEXEC; - return dev->driver->prime_handle_to_fd(dev, file_priv, - args->handle, flags, &args->fd); + args->handle, args->flags, &args->fd); } int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 83efca941388..f17d39279596 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -1,6 +1,6 @@ config DRM_EXYNOS tristate "DRM Support for Samsung SoC EXYNOS Series" - depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) + depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM) select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select FB_CFB_FILLRECT diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index 6496532aaa91..968b31c522b2 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile @@ -2,7 +2,6 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \ exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \ exynos_drm_plane.o diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 1bf6a21130c7..5245bc5e82e9 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -93,7 +93,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc) if (test_bit(BIT_SUSPENDED, &ctx->flags)) return -EPERM; - if (test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) { + if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) { val = VIDINTCON0_INTEN; if (ctx->out_type == IFTYPE_I80) val |= VIDINTCON0_FRAMEDONE; @@ -402,8 +402,6 @@ static void decon_enable(struct exynos_drm_crtc *crtc) decon_enable_vblank(ctx->crtc); decon_commit(ctx->crtc); - - set_bit(BIT_SUSPENDED, &ctx->flags); } static void decon_disable(struct exynos_drm_crtc *crtc) @@ -431,7 +429,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc) set_bit(BIT_SUSPENDED, &ctx->flags); } -void decon_te_irq_handler(struct exynos_drm_crtc *crtc) +static void decon_te_irq_handler(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -582,9 +580,9 @@ out: static int exynos5433_decon_suspend(struct device *dev) { struct decon_context *ctx = dev_get_drvdata(dev); - int i; + int i = ARRAY_SIZE(decon_clks_name); - for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) + while (--i >= 0) clk_disable_unprepare(ctx->clks[i]); return 0; diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 52bda3b42fe0..93361073af9a 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -60,7 +60,6 @@ struct decon_context { wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; - struct exynos_drm_panel_info panel; struct drm_encoder *encoder; }; diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index 673164b331c8..cff8dc788820 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -977,9 +977,7 @@ static int exynos_dp_get_modes(struct drm_connector *connector) return 0; } - drm_display_mode_from_videomode(&dp->priv.vm, mode); - mode->width_mm = dp->priv.width_mm; - mode->height_mm = dp->priv.height_mm; + drm_display_mode_from_videomode(&dp->vm, mode); connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; @@ -1155,13 +1153,6 @@ static int exynos_dp_create_connector(struct drm_encoder *encoder) return 0; } -static bool exynos_dp_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void exynos_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -1177,7 +1168,6 @@ static void exynos_dp_disable(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { - .mode_fixup = exynos_dp_mode_fixup, .mode_set = exynos_dp_mode_set, .enable = exynos_dp_enable, .disable = exynos_dp_disable, @@ -1249,8 +1239,7 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp) { int ret; - ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm, - OF_USE_NATIVE_MODE); + ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE); if (ret) { DRM_ERROR("failed: of_get_videomode() : %d\n", ret); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h index 66eec4b2d5c6..b5c2d8f47f9c 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.h +++ b/drivers/gpu/drm/exynos/exynos_dp_core.h @@ -16,6 +16,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_dp_helper.h> #include <drm/exynos_drm.h> +#include <video/videomode.h> #include "exynos_drm_drv.h" @@ -164,8 +165,7 @@ struct exynos_dp_device { struct phy *phy; int dpms_mode; int hpd_gpio; - - struct exynos_drm_panel_info priv; + struct videomode vm; }; /* exynos_dp_reg.c */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 05350ae0785b..75e570f45259 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -128,13 +128,6 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder) return 0; } -static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void exynos_dpi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -162,7 +155,6 @@ static void exynos_dpi_disable(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = { - .mode_fixup = exynos_dpi_mode_fixup, .mode_set = exynos_dpi_mode_set, .enable = exynos_dpi_enable, .disable = exynos_dpi_disable, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 68f0f36f6e7e..0dcfa04830ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -130,6 +130,8 @@ static void exynos_drm_atomic_work(struct work_struct *work) exynos_atomic_commit_complete(commit); } +static struct device *exynos_drm_get_dma_device(void); + static int exynos_drm_load(struct drm_device *dev, unsigned long flags) { struct exynos_drm_private *private; @@ -147,6 +149,16 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) dev_set_drvdata(dev->dev, dev); dev->dev_private = (void *)private; + /* the first real CRTC device is used for all dma mapping operations */ + private->dma_dev = exynos_drm_get_dma_device(); + if (!private->dma_dev) { + DRM_ERROR("no device found for DMA mapping operations.\n"); + ret = -ENODEV; + goto err_free_private; + } + DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", + dev_name(private->dma_dev)); + /* * create mapping to manage iommu table and set a pointer to iommu * mapping structure to iommu_mapping of private data. @@ -340,20 +352,6 @@ static void exynos_drm_preclose(struct drm_device *dev, static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) { - struct drm_pending_event *e, *et; - unsigned long flags; - - if (!file->driver_priv) - return; - - spin_lock_irqsave(&dev->event_lock, flags); - /* Release all events handled by page flip handler but not freed. */ - list_for_each_entry_safe(e, et, &file->event_list, link) { - list_del(&e->link); - e->destroy(e); - } - spin_unlock_irqrestore(&dev->event_lock, flags); - kfree(file->driver_priv); file->driver_priv = NULL; } @@ -495,69 +493,65 @@ static const struct dev_pm_ops exynos_drm_pm_ops = { /* forward declaration */ static struct platform_driver exynos_drm_platform_driver; +struct exynos_drm_driver_info { + struct platform_driver *driver; + unsigned int flags; +}; + +#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */ +#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */ +#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */ + +#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL) + /* * Connector drivers should not be placed before associated crtc drivers, * because connector requires pipe number of its crtc during initialization. */ -static struct platform_driver *const exynos_drm_kms_drivers[] = { -#ifdef CONFIG_DRM_EXYNOS_FIMD - &fimd_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS5433_DECON - &exynos5433_decon_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS7_DECON - &decon_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_MIC - &mic_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_DP - &dp_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_DSI - &dsi_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_MIXER - &mixer_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_HDMI - &hdmi_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_VIDI - &vidi_driver, -#endif -}; - -static struct platform_driver *const exynos_drm_non_kms_drivers[] = { -#ifdef CONFIG_DRM_EXYNOS_G2D - &g2d_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_FIMC - &fimc_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_ROTATOR - &rotator_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_GSC - &gsc_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_IPP - &ipp_driver, -#endif - &exynos_drm_platform_driver, -}; - -static struct platform_driver *const exynos_drm_drv_with_simple_dev[] = { -#ifdef CONFIG_DRM_EXYNOS_VIDI - &vidi_driver, -#endif -#ifdef CONFIG_DRM_EXYNOS_IPP - &ipp_driver, -#endif - &exynos_drm_platform_driver, +static struct exynos_drm_driver_info exynos_drm_drivers[] = { + { + DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD), + DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE + }, { + DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON), + DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE + }, { + DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON), + DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE + }, { + DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER), + DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE + }, { + DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC), + DRM_COMPONENT_DRIVER + }, { + DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP), + DRM_COMPONENT_DRIVER + }, { + DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI), + DRM_COMPONENT_DRIVER + }, { + DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI), + DRM_COMPONENT_DRIVER + }, { + DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI), + DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE + }, { + DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D), + }, { + DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC), + }, { + DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR), + }, { + DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC), + }, { + DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP), + DRM_VIRTUAL_DEVICE + }, { + &exynos_drm_platform_driver, + DRM_VIRTUAL_DEVICE + } }; -#define PDEV_COUNT ARRAY_SIZE(exynos_drm_drv_with_simple_dev) static int compare_dev(struct device *dev, void *data) { @@ -569,11 +563,15 @@ static struct component_match *exynos_drm_match_add(struct device *dev) struct component_match *match = NULL; int i; - for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) { - struct device_driver *drv = &exynos_drm_kms_drivers[i]->driver; + for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) { + struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; struct device *p = NULL, *d; - while ((d = bus_find_device(&platform_bus_type, p, drv, + if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER)) + continue; + + while ((d = bus_find_device(&platform_bus_type, p, + &info->driver->driver, (void *)platform_bus_type.match))) { put_device(p); component_match_add(dev, &match, compare_dev, d); @@ -630,91 +628,102 @@ static struct platform_driver exynos_drm_platform_driver = { }, }; -static struct platform_device *exynos_drm_pdevs[PDEV_COUNT]; +static struct device *exynos_drm_get_dma_device(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) { + struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; + struct device *dev; + + if (!info->driver || !(info->flags & DRM_DMA_DEVICE)) + continue; + + while ((dev = bus_find_device(&platform_bus_type, NULL, + &info->driver->driver, + (void *)platform_bus_type.match))) { + put_device(dev); + return dev; + } + } + return NULL; +} static void exynos_drm_unregister_devices(void) { - int i = PDEV_COUNT; + int i; + + for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) { + struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; + struct device *dev; + + if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE)) + continue; - while (--i >= 0) { - platform_device_unregister(exynos_drm_pdevs[i]); - exynos_drm_pdevs[i] = NULL; + while ((dev = bus_find_device(&platform_bus_type, NULL, + &info->driver->driver, + (void *)platform_bus_type.match))) { + put_device(dev); + platform_device_unregister(to_platform_device(dev)); + } } } static int exynos_drm_register_devices(void) { + struct platform_device *pdev; int i; - for (i = 0; i < PDEV_COUNT; ++i) { - struct platform_driver *d = exynos_drm_drv_with_simple_dev[i]; - struct platform_device *pdev = - platform_device_register_simple(d->driver.name, -1, - NULL, 0); + for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) { + struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; - if (!IS_ERR(pdev)) { - exynos_drm_pdevs[i] = pdev; + if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE)) continue; - } - while (--i >= 0) { - platform_device_unregister(exynos_drm_pdevs[i]); - exynos_drm_pdevs[i] = NULL; - } - return PTR_ERR(pdev); + pdev = platform_device_register_simple( + info->driver->driver.name, -1, NULL, 0); + if (IS_ERR(pdev)) + goto fail; } return 0; +fail: + exynos_drm_unregister_devices(); + return PTR_ERR(pdev); } -static void exynos_drm_unregister_drivers(struct platform_driver * const *drv, - int count) +static void exynos_drm_unregister_drivers(void) { - while (--count >= 0) - platform_driver_unregister(drv[count]); -} + int i; -static int exynos_drm_register_drivers(struct platform_driver * const *drv, - int count) -{ - int i, ret; + for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) { + struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; - for (i = 0; i < count; ++i) { - ret = platform_driver_register(drv[i]); - if (!ret) + if (!info->driver) continue; - while (--i >= 0) - platform_driver_unregister(drv[i]); - - return ret; + platform_driver_unregister(info->driver); } - - return 0; } -static inline int exynos_drm_register_kms_drivers(void) +static int exynos_drm_register_drivers(void) { - return exynos_drm_register_drivers(exynos_drm_kms_drivers, - ARRAY_SIZE(exynos_drm_kms_drivers)); -} + int i, ret; -static inline int exynos_drm_register_non_kms_drivers(void) -{ - return exynos_drm_register_drivers(exynos_drm_non_kms_drivers, - ARRAY_SIZE(exynos_drm_non_kms_drivers)); -} + for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) { + struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; -static inline void exynos_drm_unregister_kms_drivers(void) -{ - exynos_drm_unregister_drivers(exynos_drm_kms_drivers, - ARRAY_SIZE(exynos_drm_kms_drivers)); -} + if (!info->driver) + continue; -static inline void exynos_drm_unregister_non_kms_drivers(void) -{ - exynos_drm_unregister_drivers(exynos_drm_non_kms_drivers, - ARRAY_SIZE(exynos_drm_non_kms_drivers)); + ret = platform_driver_register(info->driver); + if (ret) + goto fail; + } + return 0; +fail: + exynos_drm_unregister_drivers(); + return ret; } static int exynos_drm_init(void) @@ -725,19 +734,12 @@ static int exynos_drm_init(void) if (ret) return ret; - ret = exynos_drm_register_kms_drivers(); + ret = exynos_drm_register_drivers(); if (ret) goto err_unregister_pdevs; - ret = exynos_drm_register_non_kms_drivers(); - if (ret) - goto err_unregister_kms_drivers; - return 0; -err_unregister_kms_drivers: - exynos_drm_unregister_kms_drivers(); - err_unregister_pdevs: exynos_drm_unregister_devices(); @@ -746,8 +748,7 @@ err_unregister_pdevs: static void exynos_drm_exit(void) { - exynos_drm_unregister_non_kms_drivers(); - exynos_drm_unregister_kms_drivers(); + exynos_drm_unregister_drivers(); exynos_drm_unregister_devices(); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 17b5ded72ff1..502f750bad2a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -219,8 +219,10 @@ struct exynos_drm_private { struct drm_crtc *crtc[MAX_CRTC]; struct drm_property *plane_zpos_property; + struct device *dma_dev; unsigned long da_start; unsigned long da_space_size; + void *mapping; unsigned int pipe; @@ -230,6 +232,13 @@ struct exynos_drm_private { wait_queue_head_t wait; }; +static inline struct device *to_dma_dev(struct drm_device *dev) +{ + struct exynos_drm_private *priv = dev->dev_private; + + return priv->dma_dev; +} + /* * Exynos drm sub driver structure. * @@ -297,7 +306,6 @@ extern struct platform_driver dp_driver; extern struct platform_driver dsi_driver; extern struct platform_driver mixer_driver; extern struct platform_driver hdmi_driver; -extern struct platform_driver exynos_drm_common_hdmi_driver; extern struct platform_driver vidi_driver; extern struct platform_driver g2d_driver; extern struct platform_driver fimc_driver; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index e977a81af2e6..63c84a106c0b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -10,6 +10,8 @@ * published by the Free Software Foundation. */ +#include <asm/unaligned.h> + #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_mipi_dsi.h> @@ -209,12 +211,6 @@ #define OLD_SCLK_MIPI_CLK_NAME "pll_clk" -#define REG_ADDR(dsi, reg_idx) ((dsi)->reg_base + \ - dsi->driver_data->reg_ofs[(reg_idx)]) -#define DSI_WRITE(dsi, reg_idx, val) writel((val), \ - REG_ADDR((dsi), (reg_idx))) -#define DSI_READ(dsi, reg_idx) readl(REG_ADDR((dsi), (reg_idx))) - static char *clk_names[5] = { "bus_clk", "sclk_mipi", "phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0", "sclk_rgb_vclk_to_dsim0" }; @@ -228,12 +224,8 @@ struct exynos_dsi_transfer { struct list_head list; struct completion completed; int result; - u8 data_id; - u8 data[2]; + struct mipi_dsi_packet packet; u16 flags; - - const u8 *tx_payload; - u16 tx_len; u16 tx_done; u8 *rx_payload; @@ -247,7 +239,7 @@ struct exynos_dsi_transfer { #define DSIM_STATE_VIDOUT_AVAILABLE BIT(3) struct exynos_dsi_driver_data { - unsigned int *reg_ofs; + const unsigned int *reg_ofs; unsigned int plltmr_reg; unsigned int has_freqband:1; unsigned int has_clklane_stop:1; @@ -255,7 +247,7 @@ struct exynos_dsi_driver_data { unsigned int max_freq; unsigned int wait_for_reset; unsigned int num_bits_resol; - unsigned int *reg_values; + const unsigned int *reg_values; }; struct exynos_dsi { @@ -324,7 +316,20 @@ enum reg_idx { DSIM_PHYTIMING2_REG, NUM_REGS }; -static unsigned int exynos_reg_ofs[] = { + +static inline void exynos_dsi_write(struct exynos_dsi *dsi, enum reg_idx idx, + u32 val) +{ + + writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]); +} + +static inline u32 exynos_dsi_read(struct exynos_dsi *dsi, enum reg_idx idx) +{ + return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]); +} + +static const unsigned int exynos_reg_ofs[] = { [DSIM_STATUS_REG] = 0x00, [DSIM_SWRST_REG] = 0x04, [DSIM_CLKCTRL_REG] = 0x08, @@ -348,7 +353,7 @@ static unsigned int exynos_reg_ofs[] = { [DSIM_PHYTIMING2_REG] = 0x6c, }; -static unsigned int exynos5433_reg_ofs[] = { +static const unsigned int exynos5433_reg_ofs[] = { [DSIM_STATUS_REG] = 0x04, [DSIM_SWRST_REG] = 0x0C, [DSIM_CLKCTRL_REG] = 0x10, @@ -390,7 +395,7 @@ enum reg_value_idx { PHYTIMING_HS_TRAIL }; -static unsigned int reg_values[] = { +static const unsigned int reg_values[] = { [RESET_TYPE] = DSIM_SWRST, [PLL_TIMER] = 500, [STOP_STATE_CNT] = 0xf, @@ -408,7 +413,25 @@ static unsigned int reg_values[] = { [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b), }; -static unsigned int exynos5433_reg_values[] = { +static const unsigned int exynos5422_reg_values[] = { + [RESET_TYPE] = DSIM_SWRST, + [PLL_TIMER] = 500, + [STOP_STATE_CNT] = 0xf, + [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf), + [PHYCTRL_VREG_LP] = 0, + [PHYCTRL_SLEW_UP] = 0, + [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08), + [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d), + [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09), + [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30), + [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e), + [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a), + [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c), + [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11), + [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d), +}; + +static const unsigned int exynos5433_reg_values[] = { [RESET_TYPE] = DSIM_FUNCRST, [PLL_TIMER] = 22200, [STOP_STATE_CNT] = 0xa, @@ -426,7 +449,7 @@ static unsigned int exynos5433_reg_values[] = { [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c), }; -static struct exynos_dsi_driver_data exynos3_dsi_driver_data = { +static const struct exynos_dsi_driver_data exynos3_dsi_driver_data = { .reg_ofs = exynos_reg_ofs, .plltmr_reg = 0x50, .has_freqband = 1, @@ -438,7 +461,7 @@ static struct exynos_dsi_driver_data exynos3_dsi_driver_data = { .reg_values = reg_values, }; -static struct exynos_dsi_driver_data exynos4_dsi_driver_data = { +static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = { .reg_ofs = exynos_reg_ofs, .plltmr_reg = 0x50, .has_freqband = 1, @@ -450,7 +473,7 @@ static struct exynos_dsi_driver_data exynos4_dsi_driver_data = { .reg_values = reg_values, }; -static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = { +static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = { .reg_ofs = exynos_reg_ofs, .plltmr_reg = 0x58, .has_clklane_stop = 1, @@ -461,7 +484,7 @@ static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = { .reg_values = reg_values, }; -static struct exynos_dsi_driver_data exynos5_dsi_driver_data = { +static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { .reg_ofs = exynos_reg_ofs, .plltmr_reg = 0x58, .num_clks = 2, @@ -471,7 +494,7 @@ static struct exynos_dsi_driver_data exynos5_dsi_driver_data = { .reg_values = reg_values, }; -static struct exynos_dsi_driver_data exynos5433_dsi_driver_data = { +static const struct exynos_dsi_driver_data exynos5433_dsi_driver_data = { .reg_ofs = exynos5433_reg_ofs, .plltmr_reg = 0xa0, .has_clklane_stop = 1, @@ -482,7 +505,18 @@ static struct exynos_dsi_driver_data exynos5433_dsi_driver_data = { .reg_values = exynos5433_reg_values, }; -static struct of_device_id exynos_dsi_of_match[] = { +static const struct exynos_dsi_driver_data exynos5422_dsi_driver_data = { + .reg_ofs = exynos5433_reg_ofs, + .plltmr_reg = 0xa0, + .has_clklane_stop = 1, + .num_clks = 2, + .max_freq = 1500, + .wait_for_reset = 1, + .num_bits_resol = 12, + .reg_values = exynos5422_reg_values, +}; + +static const struct of_device_id exynos_dsi_of_match[] = { { .compatible = "samsung,exynos3250-mipi-dsi", .data = &exynos3_dsi_driver_data }, { .compatible = "samsung,exynos4210-mipi-dsi", @@ -491,6 +525,8 @@ static struct of_device_id exynos_dsi_of_match[] = { .data = &exynos4415_dsi_driver_data }, { .compatible = "samsung,exynos5410-mipi-dsi", .data = &exynos5_dsi_driver_data }, + { .compatible = "samsung,exynos5422-mipi-dsi", + .data = &exynos5422_dsi_driver_data }, { .compatible = "samsung,exynos5433-mipi-dsi", .data = &exynos5433_dsi_driver_data }, { } @@ -515,10 +551,10 @@ static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi) static void exynos_dsi_reset(struct exynos_dsi *dsi) { - struct exynos_dsi_driver_data *driver_data = dsi->driver_data; + u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE]; reinit_completion(&dsi->completed); - DSI_WRITE(dsi, DSIM_SWRST_REG, driver_data->reg_values[RESET_TYPE]); + exynos_dsi_write(dsi, DSIM_SWRST_REG, reset_val); } #ifndef MHZ @@ -621,7 +657,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi, reg |= DSIM_FREQ_BAND(band); } - DSI_WRITE(dsi, DSIM_PLLCTRL_REG, reg); + exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg); timeout = 1000; do { @@ -629,7 +665,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi, dev_err(dsi->dev, "PLL failed to stabilize\n"); return 0; } - reg = DSI_READ(dsi, DSIM_STATUS_REG); + reg = exynos_dsi_read(dsi, DSIM_STATUS_REG); } while ((reg & DSIM_PLL_STABLE) == 0); return fout; @@ -659,7 +695,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi) dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n", hs_clk, byte_clk, esc_clk); - reg = DSI_READ(dsi, DSIM_CLKCTRL_REG); + reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG); reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS | DSIM_BYTE_CLK_SRC_MASK); @@ -669,7 +705,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi) | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1) | DSIM_BYTE_CLK_SRC(0) | DSIM_TX_REQUEST_HSCLK; - DSI_WRITE(dsi, DSIM_CLKCTRL_REG, reg); + exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg); return 0; } @@ -677,7 +713,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi) static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi) { struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - unsigned int *reg_values = driver_data->reg_values; + const unsigned int *reg_values = driver_data->reg_values; u32 reg; if (driver_data->has_freqband) @@ -686,7 +722,7 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi) /* B D-PHY: D-PHY Master & Slave Analog Block control */ reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] | reg_values[PHYCTRL_SLEW_UP]; - DSI_WRITE(dsi, DSIM_PHYCTRL_REG, reg); + exynos_dsi_write(dsi, DSIM_PHYCTRL_REG, reg); /* * T LPX: Transmitted length of any Low-Power state period @@ -694,7 +730,7 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi) * burst */ reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT]; - DSI_WRITE(dsi, DSIM_PHYTIMING_REG, reg); + exynos_dsi_write(dsi, DSIM_PHYTIMING_REG, reg); /* * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00 @@ -714,7 +750,7 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi) reg_values[PHYTIMING_CLK_POST] | reg_values[PHYTIMING_CLK_TRAIL]; - DSI_WRITE(dsi, DSIM_PHYTIMING1_REG, reg); + exynos_dsi_write(dsi, DSIM_PHYTIMING1_REG, reg); /* * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00 @@ -727,29 +763,29 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi) */ reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] | reg_values[PHYTIMING_HS_TRAIL]; - DSI_WRITE(dsi, DSIM_PHYTIMING2_REG, reg); + exynos_dsi_write(dsi, DSIM_PHYTIMING2_REG, reg); } static void exynos_dsi_disable_clock(struct exynos_dsi *dsi) { u32 reg; - reg = DSI_READ(dsi, DSIM_CLKCTRL_REG); + reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG); reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN); - DSI_WRITE(dsi, DSIM_CLKCTRL_REG, reg); + exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg); - reg = DSI_READ(dsi, DSIM_PLLCTRL_REG); + reg = exynos_dsi_read(dsi, DSIM_PLLCTRL_REG); reg &= ~DSIM_PLL_EN; - DSI_WRITE(dsi, DSIM_PLLCTRL_REG, reg); + exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg); } static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane) { - u32 reg = DSI_READ(dsi, DSIM_CONFIG_REG); + u32 reg = exynos_dsi_read(dsi, DSIM_CONFIG_REG); reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK | DSIM_LANE_EN(lane)); - DSI_WRITE(dsi, DSIM_CONFIG_REG, reg); + exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg); } static int exynos_dsi_init_link(struct exynos_dsi *dsi) @@ -760,14 +796,14 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi) u32 lanes_mask; /* Initialize FIFO pointers */ - reg = DSI_READ(dsi, DSIM_FIFOCTRL_REG); + reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG); reg &= ~0x1f; - DSI_WRITE(dsi, DSIM_FIFOCTRL_REG, reg); + exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg); usleep_range(9000, 11000); reg |= 0x1f; - DSI_WRITE(dsi, DSIM_FIFOCTRL_REG, reg); + exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg); usleep_range(9000, 11000); /* DSI configuration */ @@ -836,7 +872,7 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi) dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { reg |= DSIM_CLKLANE_STOP; } - DSI_WRITE(dsi, DSIM_CONFIG_REG, reg); + exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg); lanes_mask = BIT(dsi->lanes) - 1; exynos_dsi_enable_lane(dsi, lanes_mask); @@ -849,19 +885,19 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi) return -EFAULT; } - reg = DSI_READ(dsi, DSIM_STATUS_REG); + reg = exynos_dsi_read(dsi, DSIM_STATUS_REG); if ((reg & DSIM_STOP_STATE_DAT(lanes_mask)) != DSIM_STOP_STATE_DAT(lanes_mask)) continue; } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK))); - reg = DSI_READ(dsi, DSIM_ESCMODE_REG); + reg = exynos_dsi_read(dsi, DSIM_ESCMODE_REG); reg &= ~DSIM_STOP_STATE_CNT_MASK; reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]); - DSI_WRITE(dsi, DSIM_ESCMODE_REG, reg); + exynos_dsi_write(dsi, DSIM_ESCMODE_REG, reg); reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff); - DSI_WRITE(dsi, DSIM_TIMEOUT_REG, reg); + exynos_dsi_write(dsi, DSIM_TIMEOUT_REG, reg); return 0; } @@ -876,20 +912,20 @@ static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi) reg = DSIM_CMD_ALLOW(0xf) | DSIM_STABLE_VFP(vm->vfront_porch) | DSIM_MAIN_VBP(vm->vback_porch); - DSI_WRITE(dsi, DSIM_MVPORCH_REG, reg); + exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg); reg = DSIM_MAIN_HFP(vm->hfront_porch) | DSIM_MAIN_HBP(vm->hback_porch); - DSI_WRITE(dsi, DSIM_MHPORCH_REG, reg); + exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg); reg = DSIM_MAIN_VSA(vm->vsync_len) | DSIM_MAIN_HSA(vm->hsync_len); - DSI_WRITE(dsi, DSIM_MSYNC_REG, reg); + exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg); } reg = DSIM_MAIN_HRESOL(vm->hactive, num_bits_resol) | DSIM_MAIN_VRESOL(vm->vactive, num_bits_resol); - DSI_WRITE(dsi, DSIM_MDRESOL_REG, reg); + exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg); dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive); } @@ -898,12 +934,12 @@ static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable) { u32 reg; - reg = DSI_READ(dsi, DSIM_MDRESOL_REG); + reg = exynos_dsi_read(dsi, DSIM_MDRESOL_REG); if (enable) reg |= DSIM_MAIN_STAND_BY; else reg &= ~DSIM_MAIN_STAND_BY; - DSI_WRITE(dsi, DSIM_MDRESOL_REG, reg); + exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg); } static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi) @@ -911,7 +947,7 @@ static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi) int timeout = 2000; do { - u32 reg = DSI_READ(dsi, DSIM_FIFOCTRL_REG); + u32 reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG); if (!(reg & DSIM_SFR_HEADER_FULL)) return 0; @@ -925,34 +961,35 @@ static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi) static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm) { - u32 v = DSI_READ(dsi, DSIM_ESCMODE_REG); + u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG); if (lpm) v |= DSIM_CMD_LPDT_LP; else v &= ~DSIM_CMD_LPDT_LP; - DSI_WRITE(dsi, DSIM_ESCMODE_REG, v); + exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v); } static void exynos_dsi_force_bta(struct exynos_dsi *dsi) { - u32 v = DSI_READ(dsi, DSIM_ESCMODE_REG); + u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG); v |= DSIM_FORCE_BTA; - DSI_WRITE(dsi, DSIM_ESCMODE_REG, v); + exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v); } static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, struct exynos_dsi_transfer *xfer) { struct device *dev = dsi->dev; - const u8 *payload = xfer->tx_payload + xfer->tx_done; - u16 length = xfer->tx_len - xfer->tx_done; + struct mipi_dsi_packet *pkt = &xfer->packet; + const u8 *payload = pkt->payload + xfer->tx_done; + u16 length = pkt->payload_length - xfer->tx_done; bool first = !xfer->tx_done; u32 reg; dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", - xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done); + xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); if (length > DSI_TX_FIFO_SIZE) length = DSI_TX_FIFO_SIZE; @@ -961,9 +998,8 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, /* Send payload */ while (length >= 4) { - reg = (payload[3] << 24) | (payload[2] << 16) - | (payload[1] << 8) | payload[0]; - DSI_WRITE(dsi, DSIM_PAYLOAD_REG, reg); + reg = get_unaligned_le32(payload); + exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg); payload += 4; length -= 4; } @@ -978,10 +1014,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, /* Fall through */ case 1: reg |= payload[0]; - DSI_WRITE(dsi, DSIM_PAYLOAD_REG, reg); - break; - case 0: - /* Do nothing */ + exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg); break; } @@ -989,7 +1022,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, if (!first) return; - reg = (xfer->data[1] << 16) | (xfer->data[0] << 8) | xfer->data_id; + reg = get_unaligned_le32(pkt->header); if (exynos_dsi_wait_for_hdr_fifo(dsi)) { dev_err(dev, "waiting for header FIFO timed out\n"); return; @@ -1001,7 +1034,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, dsi->state ^= DSIM_STATE_CMD_LPM; } - DSI_WRITE(dsi, DSIM_PKTHDR_REG, reg); + exynos_dsi_write(dsi, DSIM_PKTHDR_REG, reg); if (xfer->flags & MIPI_DSI_MSG_REQ_ACK) exynos_dsi_force_bta(dsi); @@ -1017,7 +1050,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, u32 reg; if (first) { - reg = DSI_READ(dsi, DSIM_RXFIFO_REG); + reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG); switch (reg & 0x3f) { case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: @@ -1056,7 +1089,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, /* Receive payload */ while (length >= 4) { - reg = DSI_READ(dsi, DSIM_RXFIFO_REG); + reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG); payload[0] = (reg >> 0) & 0xff; payload[1] = (reg >> 8) & 0xff; payload[2] = (reg >> 16) & 0xff; @@ -1066,7 +1099,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, } if (length) { - reg = DSI_READ(dsi, DSIM_RXFIFO_REG); + reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG); switch (length) { case 3: payload[2] = (reg >> 16) & 0xff; @@ -1085,7 +1118,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, clear_fifo: length = DSI_RX_FIFO_SIZE / 4; do { - reg = DSI_READ(dsi, DSIM_RXFIFO_REG); + reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG); if (reg == DSI_RX_FIFO_EMPTY) break; } while (--length); @@ -1110,13 +1143,14 @@ again: spin_unlock_irqrestore(&dsi->transfer_lock, flags); - if (xfer->tx_len && xfer->tx_done == xfer->tx_len) + if (xfer->packet.payload_length && + xfer->tx_done == xfer->packet.payload_length) /* waiting for RX */ return; exynos_dsi_send_to_fifo(dsi, xfer); - if (xfer->tx_len || xfer->rx_len) + if (xfer->packet.payload_length || xfer->rx_len) return; xfer->result = 0; @@ -1152,10 +1186,11 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi) spin_unlock_irqrestore(&dsi->transfer_lock, flags); dev_dbg(dsi->dev, - "> xfer %p, tx_len %u, tx_done %u, rx_len %u, rx_done %u\n", - xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done); + "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", + xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, + xfer->rx_done); - if (xfer->tx_done != xfer->tx_len) + if (xfer->tx_done != xfer->packet.payload_length) return true; if (xfer->rx_done != xfer->rx_len) @@ -1226,9 +1261,10 @@ static int exynos_dsi_transfer(struct exynos_dsi *dsi, wait_for_completion_timeout(&xfer->completed, msecs_to_jiffies(DSI_XFER_TIMEOUT_MS)); if (xfer->result == -ETIMEDOUT) { + struct mipi_dsi_packet *pkt = &xfer->packet; exynos_dsi_remove_transfer(dsi, xfer); - dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 2, xfer->data, - xfer->tx_len, xfer->tx_payload); + dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header, + (int)pkt->payload_length, pkt->payload); return -ETIMEDOUT; } @@ -1241,20 +1277,20 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id) struct exynos_dsi *dsi = dev_id; u32 status; - status = DSI_READ(dsi, DSIM_INTSRC_REG); + status = exynos_dsi_read(dsi, DSIM_INTSRC_REG); if (!status) { static unsigned long int j; if (printk_timed_ratelimit(&j, 500)) dev_warn(dsi->dev, "spurious interrupt\n"); return IRQ_HANDLED; } - DSI_WRITE(dsi, DSIM_INTSRC_REG, status); + exynos_dsi_write(dsi, DSIM_INTSRC_REG, status); if (status & DSIM_INT_SW_RST_RELEASE) { u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY | DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_FRAME_DONE | DSIM_INT_RX_ECC_ERR | DSIM_INT_SW_RST_RELEASE); - DSI_WRITE(dsi, DSIM_INTMSK_REG, mask); + exynos_dsi_write(dsi, DSIM_INTMSK_REG, mask); complete(&dsi->completed); return IRQ_HANDLED; } @@ -1401,12 +1437,6 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host, return 0; } -/* distinguish between short and long DSI packet types */ -static bool exynos_dsi_is_short_dsi_type(u8 type) -{ - return (type & 0x0f) <= 8; -} - static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { @@ -1424,25 +1454,9 @@ static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host, dsi->state |= DSIM_STATE_INITIALIZED; } - if (msg->tx_len == 0) - return -EINVAL; - - xfer.data_id = msg->type | (msg->channel << 6); - - if (exynos_dsi_is_short_dsi_type(msg->type)) { - const char *tx_buf = msg->tx_buf; - - if (msg->tx_len > 2) - return -EINVAL; - xfer.tx_len = 0; - xfer.data[0] = tx_buf[0]; - xfer.data[1] = (msg->tx_len == 2) ? tx_buf[1] : 0; - } else { - xfer.tx_len = msg->tx_len; - xfer.data[0] = msg->tx_len & 0xff; - xfer.data[1] = msg->tx_len >> 8; - xfer.tx_payload = msg->tx_buf; - } + ret = mipi_dsi_create_packet(&xfer.packet, msg); + if (ret < 0) + return ret; xfer.rx_len = msg->rx_len; xfer.rx_payload = msg->rx_buf; @@ -1597,13 +1611,6 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder) return 0; } -static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void exynos_dsi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -1623,7 +1630,6 @@ static void exynos_dsi_mode_set(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = { - .mode_fixup = exynos_dsi_mode_fixup, .mode_set = exynos_dsi_mode_set, .enable = exynos_dsi_enable, .disable = exynos_dsi_disable, @@ -1782,6 +1788,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, bridge = of_drm_find_bridge(dsi->bridge_node); if (bridge) { + encoder->bridge = bridge; drm_bridge_attach(drm_dev, bridge); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index f6118baa8e3e..4ae860c44f1d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -50,7 +50,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info, if (vm_size > exynos_gem->size) return -EINVAL; - ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->pages, + ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->size, &exynos_gem->dma_attrs); if (ret < 0) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index c747824f3c98..0525c56145db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -163,7 +163,6 @@ struct fimc_context { u32 clk_frequency; struct regmap *sysreg; struct fimc_scaler sc; - struct exynos_drm_ipp_pol pol; int id; int irq; bool suspended; @@ -260,32 +259,6 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb) fimc_write(ctx, cfg, EXYNOS_CIGCTRL); } -static void fimc_set_polarity(struct fimc_context *ctx, - struct exynos_drm_ipp_pol *pol) -{ - u32 cfg; - - DRM_DEBUG_KMS("inv_pclk[%d]inv_vsync[%d]\n", - pol->inv_pclk, pol->inv_vsync); - DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n", - pol->inv_href, pol->inv_hsync); - - cfg = fimc_read(ctx, EXYNOS_CIGCTRL); - cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC | - EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC); - - if (pol->inv_pclk) - cfg |= EXYNOS_CIGCTRL_INVPOLPCLK; - if (pol->inv_vsync) - cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC; - if (pol->inv_href) - cfg |= EXYNOS_CIGCTRL_INVPOLHREF; - if (pol->inv_hsync) - cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC; - - fimc_write(ctx, cfg, EXYNOS_CIGCTRL); -} - static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable) { u32 cfg; @@ -1467,7 +1440,6 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) /* If set ture, we can save jpeg about screen */ fimc_handle_jpeg(ctx, false); fimc_set_scaler(ctx, &ctx->sc); - fimc_set_polarity(ctx, &ctx->pol); switch (cmd) { case IPP_CMD_M2M: @@ -1723,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev) goto err_put_clk; } - DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); + DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); spin_lock_init(&ctx->lock); platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 70194d0e4fe4..51d484ae9f49 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -94,12 +94,14 @@ struct fimd_driver_data { unsigned int lcdblk_offset; unsigned int lcdblk_vt_shift; unsigned int lcdblk_bypass_shift; + unsigned int lcdblk_mic_bypass_shift; unsigned int has_shadowcon:1; unsigned int has_clksel:1; unsigned int has_limited_fmt:1; unsigned int has_vidoutcon:1; unsigned int has_vtsel:1; + unsigned int has_mic_bypass:1; }; static struct fimd_driver_data s3c64xx_fimd_driver_data = { @@ -145,6 +147,18 @@ static struct fimd_driver_data exynos5_fimd_driver_data = { .has_vtsel = 1, }; +static struct fimd_driver_data exynos5420_fimd_driver_data = { + .timing_base = 0x20000, + .lcdblk_offset = 0x214, + .lcdblk_vt_shift = 24, + .lcdblk_bypass_shift = 15, + .lcdblk_mic_bypass_shift = 11, + .has_shadowcon = 1, + .has_vidoutcon = 1, + .has_vtsel = 1, + .has_mic_bypass = 1, +}; + struct fimd_context { struct device *dev; struct drm_device *drm_dev; @@ -168,7 +182,6 @@ struct fimd_context { atomic_t win_updated; atomic_t triggering; - struct exynos_drm_panel_info panel; struct fimd_driver_data *driver_data; struct drm_encoder *encoder; }; @@ -184,6 +197,8 @@ static const struct of_device_id fimd_driver_dt_match[] = { .data = &exynos4415_fimd_driver_data }, { .compatible = "samsung,exynos5250-fimd", .data = &exynos5_fimd_driver_data }, + { .compatible = "samsung,exynos5420-fimd", + .data = &exynos5420_fimd_driver_data }, {}, }; MODULE_DEVICE_TABLE(of, fimd_driver_dt_match); @@ -380,7 +395,7 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, } /* Find the clock divider value that gets us closest to ideal_clk */ - clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk); + clkdiv = DIV_ROUND_CLOSEST(clk_get_rate(ctx->lcd_clk), ideal_clk); return (clkdiv < 0x100) ? clkdiv : 0xff; } @@ -461,6 +476,18 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) return; } + /* TODO: When MIC is enabled for display path, the lcdblk_mic_bypass + * bit should be cleared. + */ + if (driver_data->has_mic_bypass && ctx->sysreg && + regmap_update_bits(ctx->sysreg, + driver_data->lcdblk_offset, + 0x1 << driver_data->lcdblk_mic_bypass_shift, + 0x1 << driver_data->lcdblk_mic_bypass_shift)) { + DRM_ERROR("Failed to update sysreg for bypass mic.\n"); + return; + } + /* setup horizontal and vertical display size. */ val = VIDTCON2_LINEVAL(mode->vdisplay - 1) | VIDTCON2_HOZVAL(mode->hdisplay - 1) | @@ -861,7 +888,8 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) * clock. On these SoCs the bootloader may enable it but any * power domain off/on will reset it to disable state. */ - if (ctx->driver_data != &exynos5_fimd_driver_data) + if (ctx->driver_data != &exynos5_fimd_driver_data || + ctx->driver_data != &exynos5420_fimd_driver_data) return; val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index c17efdb238a6..193d3602dffb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -259,7 +259,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) init_dma_attrs(&g2d->cmdlist_dma_attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); - g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev, + g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, &g2d->cmdlist_pool, GFP_KERNEL, &g2d->cmdlist_dma_attrs); @@ -293,7 +293,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) return 0; err: - dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, + dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); return ret; @@ -306,7 +306,8 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d) kfree(g2d->cmdlist_node); if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) { - dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, + dma_free_attrs(to_dma_dev(subdrv->drm_dev), + G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); } @@ -880,7 +881,6 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; struct drm_exynos_pending_g2d_event *e; struct timeval now; - unsigned long flags; if (list_empty(&runqueue_node->event_list)) return; @@ -893,10 +893,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) e->event.tv_usec = now.tv_usec; e->event.cmdlist_no = cmdlist_no; - spin_lock_irqsave(&drm_dev->event_lock, flags); - list_move_tail(&e->base.link, &e->base.file_priv->event_list); - wake_up_interruptible(&e->base.file_priv->event_wait); - spin_unlock_irqrestore(&drm_dev->event_lock, flags); + drm_send_event(drm_dev, &e->base); } static irqreturn_t g2d_irq_handler(int irq, void *dev_id) @@ -1072,7 +1069,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, struct drm_exynos_pending_g2d_event *e; struct g2d_cmdlist_node *node; struct g2d_cmdlist *cmdlist; - unsigned long flags; int size; int ret; @@ -1094,21 +1090,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, node->event = NULL; if (req->event_type != G2D_EVENT_NOT) { - spin_lock_irqsave(&drm_dev->event_lock, flags); - if (file->event_space < sizeof(e->event)) { - spin_unlock_irqrestore(&drm_dev->event_lock, flags); - ret = -ENOMEM; - goto err; - } - file->event_space -= sizeof(e->event); - spin_unlock_irqrestore(&drm_dev->event_lock, flags); - e = kzalloc(sizeof(*node->event), GFP_KERNEL); if (!e) { - spin_lock_irqsave(&drm_dev->event_lock, flags); - file->event_space += sizeof(e->event); - spin_unlock_irqrestore(&drm_dev->event_lock, flags); - ret = -ENOMEM; goto err; } @@ -1116,9 +1099,12 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, e->event.base.type = DRM_EXYNOS_G2D_EVENT; e->event.base.length = sizeof(e->event); e->event.user_data = req->user_data; - e->base.event = &e->event.base; - e->base.file_priv = file; - e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; + + ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base); + if (ret) { + kfree(e); + goto err; + } node->event = e; } @@ -1166,7 +1152,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, goto err_free_event; } - cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd; + cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd; if (copy_from_user(cmdlist->data + cmdlist->last, (void __user *)cmd, @@ -1184,7 +1170,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, if (req->cmd_buf_nr) { struct drm_exynos_g2d_cmd *cmd_buf; - cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf; + cmd_buf = (struct drm_exynos_g2d_cmd *) + (unsigned long)req->cmd_buf; if (copy_from_user(cmdlist->data + cmdlist->last, (void __user *)cmd_buf, @@ -1219,12 +1206,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, err_unmap: g2d_unmap_cmdlist_gem(g2d, node, file); err_free_event: - if (node->event) { - spin_lock_irqsave(&drm_dev->event_lock, flags); - file->event_space += sizeof(e->event); - spin_unlock_irqrestore(&drm_dev->event_lock, flags); - kfree(node->event); - } + if (node->event) + drm_event_cancel_free(drm_dev, &node->event->base); err: g2d_put_cmdlist(g2d, node); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 32358c5e3db4..54b639497d23 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -65,7 +65,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) return -ENOMEM; } - exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size, + exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, &exynos_gem->dma_addr, GFP_KERNEL, &exynos_gem->dma_attrs); if (!exynos_gem->cookie) { @@ -73,7 +73,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) goto err_free; } - ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie, + ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->size, &exynos_gem->dma_attrs); if (ret < 0) { @@ -98,7 +98,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) err_sgt_free: sg_free_table(&sgt); err_dma_free: - dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie, + dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, exynos_gem->dma_addr, &exynos_gem->dma_attrs); err_free: drm_free_large(exynos_gem->pages); @@ -118,7 +118,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", (unsigned long)exynos_gem->dma_addr, exynos_gem->size); - dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie, + dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, (dma_addr_t)exynos_gem->dma_addr, &exynos_gem->dma_attrs); @@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } - DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); + DRM_DEBUG_KMS("created file object = %p\n", obj->filp); return exynos_gem; } @@ -335,7 +335,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, if (vm_size > exynos_gem->size) return -EINVAL; - ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages, + ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->size, &exynos_gem->dma_attrs); if (ret < 0) { @@ -381,7 +381,7 @@ int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, mutex_lock(&drm_dev->struct_mutex); - nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); + nents = dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir); if (!nents) { DRM_ERROR("failed to map sgl with dma.\n"); mutex_unlock(&drm_dev->struct_mutex); @@ -396,7 +396,7 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, struct sg_table *sgt, enum dma_data_direction dir) { - dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); + dma_unmap_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir); } void exynos_drm_gem_free_object(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 7aecd23cfa11..5d20da8f957e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev) return ret; } - DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); + DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); mutex_init(&ctx->lock); platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c index d73b9ad35b7a..7ca09ee19656 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c @@ -9,7 +9,7 @@ * option) any later version. */ -#include <drmP.h> +#include <drm/drmP.h> #include <drm/exynos_drm.h> #include <linux/dma-mapping.h> @@ -30,7 +30,6 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev) { struct dma_iommu_mapping *mapping = NULL; struct exynos_drm_private *priv = drm_dev->dev_private; - struct device *dev = drm_dev->dev; if (!priv->da_start) priv->da_start = EXYNOS_DEV_ADDR_START; @@ -43,18 +42,9 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev) if (IS_ERR(mapping)) return PTR_ERR(mapping); - dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), - GFP_KERNEL); - if (!dev->dma_parms) - goto error; - - dma_set_max_seg_size(dev, 0xffffffffu); - dev->archdata.mapping = mapping; + priv->mapping = mapping; return 0; -error: - arm_iommu_release_mapping(mapping); - return -ENOMEM; } /* @@ -67,9 +57,9 @@ error: */ void drm_release_iommu_mapping(struct drm_device *drm_dev) { - struct device *dev = drm_dev->dev; + struct exynos_drm_private *priv = drm_dev->dev_private; - arm_iommu_release_mapping(dev->archdata.mapping); + arm_iommu_release_mapping(priv->mapping); } /* @@ -84,10 +74,10 @@ void drm_release_iommu_mapping(struct drm_device *drm_dev) int drm_iommu_attach_device(struct drm_device *drm_dev, struct device *subdrv_dev) { - struct device *dev = drm_dev->dev; + struct exynos_drm_private *priv = drm_dev->dev_private; int ret; - if (!dev->archdata.mapping) + if (!priv->mapping) return 0; subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, @@ -101,23 +91,12 @@ int drm_iommu_attach_device(struct drm_device *drm_dev, if (subdrv_dev->archdata.mapping) arm_iommu_detach_device(subdrv_dev); - ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); + ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); if (ret < 0) { DRM_DEBUG_KMS("failed iommu attach.\n"); return ret; } - /* - * Set dma_ops to drm_device just one time. - * - * The dma mapping api needs device object and the api is used - * to allocate physial memory and map it with iommu table. - * If iommu attach succeeded, the sub driver would have dma_ops - * for iommu and also all sub drivers have same dma_ops. - */ - if (get_dma_ops(dev) == get_dma_ops(NULL)) - set_dma_ops(dev, get_dma_ops(subdrv_dev)); - return 0; } @@ -133,8 +112,8 @@ int drm_iommu_attach_device(struct drm_device *drm_dev, void drm_iommu_detach_device(struct drm_device *drm_dev, struct device *subdrv_dev) { - struct device *dev = drm_dev->dev; - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct exynos_drm_private *priv = drm_dev->dev_private; + struct dma_iommu_mapping *mapping = priv->mapping; if (!mapping || !mapping->domain) return; diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h index dc1b5441f491..5ffebe02ee4d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h @@ -29,9 +29,9 @@ void drm_iommu_detach_device(struct drm_device *dev_dev, static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) { - struct device *dev = drm_dev->dev; + struct exynos_drm_private *priv = drm_dev->dev_private; - return dev->archdata.mapping ? true : false; + return priv->mapping ? true : false; } #else diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 67d24236e745..9c84ee76f18a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) * e.g PAUSE state, queue buf, command control. */ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); + DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv); mutex_lock(&ippdrv->cmd_lock); list_for_each_entry(c_node, &ippdrv->cmd_list, list) { @@ -388,8 +388,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, } property->prop_id = ret; - DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", - property->prop_id, property->cmd, (int)ippdrv); + DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n", + property->prop_id, property->cmd, ippdrv); /* stored property information and ippdrv in private data */ c_node->property = *property; @@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev, { int i; - DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); + DRM_DEBUG_KMS("node[%p]\n", m_node); if (!m_node) { DRM_ERROR("invalid dequeue node.\n"); @@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node m_node->buf_id = qbuf->buf_id; INIT_LIST_HEAD(&m_node->list); - DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); + DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id); DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); for_each_ipp_planar(i) { @@ -582,8 +582,8 @@ static struct drm_exynos_ipp_mem_node buf_info->handles[i] = qbuf->handle[i]; buf_info->base[i] = *addr; - DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, - buf_info->base[i], buf_info->handles[i]); + DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i, + &buf_info->base[i], buf_info->handles[i]); } } @@ -618,27 +618,18 @@ static void ipp_clean_mem_nodes(struct drm_device *drm_dev, mutex_unlock(&c_node->mem_lock); } -static void ipp_free_event(struct drm_pending_event *event) -{ - kfree(event); -} - static int ipp_get_event(struct drm_device *drm_dev, struct drm_exynos_ipp_cmd_node *c_node, struct drm_exynos_ipp_queue_buf *qbuf) { struct drm_exynos_ipp_send_event *e; - unsigned long flags; + int ret; DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) { - spin_lock_irqsave(&drm_dev->event_lock, flags); - c_node->filp->event_space += sizeof(e->event); - spin_unlock_irqrestore(&drm_dev->event_lock, flags); + if (!e) return -ENOMEM; - } /* make event */ e->event.base.type = DRM_EXYNOS_IPP_EVENT; @@ -646,9 +637,13 @@ static int ipp_get_event(struct drm_device *drm_dev, e->event.user_data = qbuf->user_data; e->event.prop_id = qbuf->prop_id; e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; - e->base.event = &e->event.base; - e->base.file_priv = c_node->filp; - e->base.destroy = ipp_free_event; + + ret = drm_event_reserve_init(drm_dev, c_node->filp, &e->base, &e->event.base); + if (ret) { + kfree(e); + return ret; + } + mutex_lock(&c_node->event_lock); list_add_tail(&e->base.link, &c_node->event_list); mutex_unlock(&c_node->event_lock); @@ -664,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, mutex_lock(&c_node->event_lock); list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { - DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); + DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e); /* * qbuf == NULL condition means all event deletion. @@ -755,7 +750,7 @@ static struct drm_exynos_ipp_mem_node /* find memory node from memory list */ list_for_each_entry(m_node, head, list) { - DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); + DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node); /* compare buffer id */ if (m_node->buf_id == qbuf->buf_id) @@ -772,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, struct exynos_drm_ipp_ops *ops = NULL; int ret = 0; - DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); + DRM_DEBUG_KMS("node[%p]\n", m_node); if (!m_node) { DRM_ERROR("invalid queue node.\n"); @@ -1237,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, m_node = list_first_entry(head, struct drm_exynos_ipp_mem_node, list); - DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); + DRM_DEBUG_KMS("m_node[%p]\n", m_node); ret = ipp_set_mem_node(ippdrv, c_node, m_node); if (ret) { @@ -1412,7 +1407,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, struct drm_exynos_ipp_send_event *e; struct list_head *head; struct timeval now; - unsigned long flags; u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, }; int ret, i; @@ -1525,10 +1519,7 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, for_each_ipp_ops(i) e->event.buf_id[i] = tbuf_id[i]; - spin_lock_irqsave(&drm_dev->event_lock, flags); - list_move_tail(&e->base.link, &e->base.file_priv->event_list); - wake_up_interruptible(&e->base.file_priv->event_wait); - spin_unlock_irqrestore(&drm_dev->event_lock, flags); + drm_send_event(drm_dev, &e->base); mutex_unlock(&c_node->event_lock); DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", @@ -1610,8 +1601,8 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) } ippdrv->prop_list.ipp_id = ret; - DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", - count++, (int)ippdrv, ret); + DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n", + count++, ippdrv, ret); /* store parent device for node */ ippdrv->parent_dev = dev; @@ -1668,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, file_priv->ipp_dev = dev; - DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); + DRM_DEBUG_KMS("done priv[%p]\n", dev); return 0; } @@ -1685,8 +1676,8 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, mutex_lock(&ippdrv->cmd_lock); list_for_each_entry_safe(c_node, tc_node, &ippdrv->cmd_list, list) { - DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", - count++, (int)ippdrv); + DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", + count++, ippdrv); if (c_node->filp == file) { /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index 4eaef36aec5a..9869d70e9e54 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/of_graph.h> #include <linux/clk.h> +#include <linux/component.h> #include <drm/drmP.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> @@ -306,9 +307,9 @@ exit: return ret; } -void mic_disable(struct drm_bridge *bridge) { } +static void mic_disable(struct drm_bridge *bridge) { } -void mic_post_disable(struct drm_bridge *bridge) +static void mic_post_disable(struct drm_bridge *bridge) { struct exynos_mic *mic = bridge->driver_private; int i; @@ -328,7 +329,7 @@ already_disabled: mutex_unlock(&mic_mutex); } -void mic_pre_enable(struct drm_bridge *bridge) +static void mic_pre_enable(struct drm_bridge *bridge) { struct exynos_mic *mic = bridge->driver_private; int ret, i; @@ -371,11 +372,35 @@ already_enabled: mutex_unlock(&mic_mutex); } -void mic_enable(struct drm_bridge *bridge) { } +static void mic_enable(struct drm_bridge *bridge) { } -void mic_destroy(struct drm_bridge *bridge) +static const struct drm_bridge_funcs mic_bridge_funcs = { + .disable = mic_disable, + .post_disable = mic_post_disable, + .pre_enable = mic_pre_enable, + .enable = mic_enable, +}; + +static int exynos_mic_bind(struct device *dev, struct device *master, + void *data) { - struct exynos_mic *mic = bridge->driver_private; + struct exynos_mic *mic = dev_get_drvdata(dev); + int ret; + + mic->bridge.funcs = &mic_bridge_funcs; + mic->bridge.of_node = dev->of_node; + mic->bridge.driver_private = mic; + ret = drm_bridge_add(&mic->bridge); + if (ret) + DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); + + return ret; +} + +static void exynos_mic_unbind(struct device *dev, struct device *master, + void *data) +{ + struct exynos_mic *mic = dev_get_drvdata(dev); int i; mutex_lock(&mic_mutex); @@ -387,16 +412,16 @@ void mic_destroy(struct drm_bridge *bridge) already_disabled: mutex_unlock(&mic_mutex); + + drm_bridge_remove(&mic->bridge); } -static const struct drm_bridge_funcs mic_bridge_funcs = { - .disable = mic_disable, - .post_disable = mic_post_disable, - .pre_enable = mic_pre_enable, - .enable = mic_enable, +static const struct component_ops exynos_mic_component_ops = { + .bind = exynos_mic_bind, + .unbind = exynos_mic_unbind, }; -int exynos_mic_probe(struct platform_device *pdev) +static int exynos_mic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_mic *mic; @@ -435,17 +460,8 @@ int exynos_mic_probe(struct platform_device *pdev) goto err; } - mic->bridge.funcs = &mic_bridge_funcs; - mic->bridge.of_node = dev->of_node; - mic->bridge.driver_private = mic; - ret = drm_bridge_add(&mic->bridge); - if (ret) { - DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); - goto err; - } - for (i = 0; i < NUM_CLKS; i++) { - mic->clks[i] = of_clk_get_by_name(dev->of_node, clk_names[i]); + mic->clks[i] = devm_clk_get(dev, clk_names[i]); if (IS_ERR(mic->clks[i])) { DRM_ERROR("mic: Failed to get clock (%s)\n", clk_names[i]); @@ -454,7 +470,10 @@ int exynos_mic_probe(struct platform_device *pdev) } } + platform_set_drvdata(pdev, mic); + DRM_DEBUG_KMS("MIC has been probed\n"); + return component_add(dev, &exynos_mic_component_ops); err: return ret; @@ -462,14 +481,7 @@ err: static int exynos_mic_remove(struct platform_device *pdev) { - struct exynos_mic *mic = platform_get_drvdata(pdev); - int i; - - drm_bridge_remove(&mic->bridge); - - for (i = NUM_CLKS - 1; i > -1; i--) - clk_put(mic->clks[i]); - + component_del(&pdev->dev, &exynos_mic_component_ops); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index bea0f7826d30..f18fbe43f55f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -20,7 +20,6 @@ #include <drm/drmP.h> #include <drm/exynos_drm.h> #include "regs-rotator.h" -#include "exynos_drm.h" #include "exynos_drm_drv.h" #include "exynos_drm_ipp.h" @@ -754,7 +753,7 @@ static int rotator_probe(struct platform_device *pdev) goto err_ippdrv_register; } - DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv); + DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv); platform_set_drvdata(pdev, rot); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 62ac4e5fa51d..608b0afa337f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -223,7 +223,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work) } } -static int vidi_show_connection(struct device *dev, +static ssize_t vidi_show_connection(struct device *dev, struct device_attribute *attr, char *buf) { struct vidi_context *ctx = dev_get_drvdata(dev); @@ -238,7 +238,7 @@ static int vidi_show_connection(struct device *dev, return rc; } -static int vidi_store_connection(struct device *dev, +static ssize_t vidi_store_connection(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -294,7 +294,9 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, } if (vidi->connection) { - struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid; + struct edid *raw_edid; + + raw_edid = (struct edid *)(unsigned long)vidi->edid; if (!drm_edid_is_valid(raw_edid)) { DRM_DEBUG_KMS("edid data is invalid.\n"); return -EINVAL; @@ -410,13 +412,6 @@ static int vidi_create_connector(struct drm_encoder *encoder) return 0; } -static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void exynos_vidi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -432,7 +427,6 @@ static void exynos_vidi_disable(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = { - .mode_fixup = exynos_vidi_mode_fixup, .mode_set = exynos_vidi_mode_set, .enable = exynos_vidi_enable, .disable = exynos_vidi_disable, diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 21a29dbce18c..e148d728e28c 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -867,10 +867,8 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, { u32 hdr_sum; u8 chksum; - u32 mod; u8 ar; - mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); if (hdata->dvi_mode) { hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_DO_NOT_TRANSMIT); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index d8ab8f0af10c..7574db2da413 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -42,34 +42,24 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; - int ret; - ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, - DCU_MODE_DCU_MODE_MASK, - DCU_MODE_DCU_MODE(DCU_MODE_OFF)); - if (ret) - dev_err(fsl_dev->dev, "Disable CRTC failed\n"); - ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); - if (ret) - dev_err(fsl_dev->dev, "Enable CRTC failed\n"); + regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_OFF)); + regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); } static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; - int ret; - ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, - DCU_MODE_DCU_MODE_MASK, - DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); - if (ret) - dev_err(fsl_dev->dev, "Enable CRTC failed\n"); - ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); - if (ret) - dev_err(fsl_dev->dev, "Enable CRTC failed\n"); + regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); + regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); } static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc, @@ -84,9 +74,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; struct drm_display_mode *mode = &crtc->state->mode; - unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index; + unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index, pol = 0; unsigned long dcuclk; - int ret; index = drm_crtc_index(crtc); dcuclk = clk_get_rate(fsl_dev->clk); @@ -100,51 +89,36 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) vfp = mode->vsync_start - mode->vdisplay; vsw = mode->vsync_end - mode->vsync_start; - ret = regmap_write(fsl_dev->regmap, DCU_HSYN_PARA, - DCU_HSYN_PARA_BP(hbp) | - DCU_HSYN_PARA_PW(hsw) | - DCU_HSYN_PARA_FP(hfp)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_VSYN_PARA, - DCU_VSYN_PARA_BP(vbp) | - DCU_VSYN_PARA_PW(vsw) | - DCU_VSYN_PARA_FP(vfp)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_DISP_SIZE, - DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) | - DCU_DISP_SIZE_DELTA_X(mode->hdisplay)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_SYN_POL, - DCU_SYN_POL_INV_VS_LOW | DCU_SYN_POL_INV_HS_LOW); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) | - DCU_BGND_G(0) | DCU_BGND_B(0)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_DCU_MODE, - DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_THRESHOLD, - DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | - DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | - DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); - if (ret) - goto set_failed; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + pol |= DCU_SYN_POL_INV_HS_LOW; + + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + pol |= DCU_SYN_POL_INV_VS_LOW; + + regmap_write(fsl_dev->regmap, DCU_HSYN_PARA, + DCU_HSYN_PARA_BP(hbp) | + DCU_HSYN_PARA_PW(hsw) | + DCU_HSYN_PARA_FP(hfp)); + regmap_write(fsl_dev->regmap, DCU_VSYN_PARA, + DCU_VSYN_PARA_BP(vbp) | + DCU_VSYN_PARA_PW(vsw) | + DCU_VSYN_PARA_FP(vfp)); + regmap_write(fsl_dev->regmap, DCU_DISP_SIZE, + DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) | + DCU_DISP_SIZE_DELTA_X(mode->hdisplay)); + regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div); + regmap_write(fsl_dev->regmap, DCU_SYN_POL, pol); + regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) | + DCU_BGND_G(0) | DCU_BGND_B(0)); + regmap_write(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN); + regmap_write(fsl_dev->regmap, DCU_THRESHOLD, + DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | + DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | + DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); + regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); return; -set_failed: - dev_err(dev->dev, "set DCU register failed\n"); } static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { @@ -174,10 +148,15 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) int ret; primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); + if (!primary) + return -ENOMEM; + ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL, &fsl_dcu_drm_crtc_funcs, NULL); - if (ret < 0) + if (ret) { + primary->funcs->destroy(primary); return ret; + } drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs); @@ -185,26 +164,15 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) reg_num = LS1021A_LAYER_REG_NUM; else reg_num = VF610_LAYER_REG_NUM; - for (i = 0; i <= fsl_dev->soc->total_layer; i++) { - for (j = 0; j < reg_num; j++) { - ret = regmap_write(fsl_dev->regmap, - DCU_CTRLDESCLN(i, j), 0); - if (ret) - goto init_failed; - } + for (i = 0; i < fsl_dev->soc->total_layer; i++) { + for (j = 1; j <= reg_num; j++) + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); } - ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, - DCU_MODE_DCU_MODE_MASK, - DCU_MODE_DCU_MODE(DCU_MODE_OFF)); - if (ret) - goto init_failed; - ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); - if (ret) - goto init_failed; + regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_OFF)); + regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); return 0; -init_failed: - dev_err(fsl_dev->dev, "init DCU register failed\n"); - return ret; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index fca97d3fc846..e8d9337a66d8 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -28,37 +28,36 @@ #include "fsl_dcu_drm_crtc.h" #include "fsl_dcu_drm_drv.h" +static bool fsl_dcu_drm_is_volatile_reg(struct device *dev, unsigned int reg) +{ + if (reg == DCU_INT_STATUS || reg == DCU_UPDATE_MODE) + return true; + + return false; +} + static const struct regmap_config fsl_dcu_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .cache_type = REGCACHE_RBTREE, + + .volatile_reg = fsl_dcu_drm_is_volatile_reg, }; static int fsl_dcu_drm_irq_init(struct drm_device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; - unsigned int value; int ret; ret = drm_irq_install(dev, fsl_dev->irq); if (ret < 0) dev_err(dev->dev, "failed to install IRQ handler\n"); - ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); - if (ret) - dev_err(dev->dev, "set DCU_INT_STATUS failed\n"); - ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); - if (ret) - dev_err(dev->dev, "read DCU_INT_MASK failed\n"); - value &= DCU_INT_MASK_VBLANK; - ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); - if (ret) - dev_err(dev->dev, "set DCU_INT_MASK failed\n"); - ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); - if (ret) - dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n"); + regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); + regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); + regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); return ret; } @@ -112,10 +111,6 @@ static int fsl_dcu_unload(struct drm_device *dev) return 0; } -static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file) -{ -} - static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -124,18 +119,17 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) int ret; ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status); - if (ret) - dev_err(dev->dev, "set DCU_INT_STATUS failed\n"); + if (ret) { + dev_err(dev->dev, "read DCU_INT_STATUS failed\n"); + return IRQ_NONE; + } + if (int_status & DCU_INT_STATUS_VBLANK) drm_handle_vblank(dev, 0); - ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0xffffffff); - if (ret) - dev_err(dev->dev, "set DCU_INT_STATUS failed\n"); - ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); - if (ret) - dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n"); + regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); + regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); return IRQ_HANDLED; } @@ -144,15 +138,11 @@ static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, unsigned int pipe) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; unsigned int value; - int ret; - ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); - if (ret) - dev_err(dev->dev, "read DCU_INT_MASK failed\n"); + regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); value &= ~DCU_INT_MASK_VBLANK; - ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); - if (ret) - dev_err(dev->dev, "set DCU_INT_MASK failed\n"); + regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); + return 0; } @@ -161,15 +151,10 @@ static void fsl_dcu_drm_disable_vblank(struct drm_device *dev, { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; unsigned int value; - int ret; - ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); - if (ret) - dev_err(dev->dev, "read DCU_INT_MASK failed\n"); + regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); value |= DCU_INT_MASK_VBLANK; - ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); - if (ret) - dev_err(dev->dev, "set DCU_INT_MASK failed\n"); + regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); } static const struct file_operations fsl_dcu_drm_fops = { @@ -191,7 +176,6 @@ static struct drm_driver fsl_dcu_drm_driver = { | DRIVER_PRIME | DRIVER_ATOMIC, .load = fsl_dcu_load, .unload = fsl_dcu_unload, - .preclose = fsl_dcu_drm_preclose, .irq_handler = fsl_dcu_drm_irq, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = fsl_dcu_drm_enable_vblank, diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h index 579b9e44e764..6413ac9e4769 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h @@ -133,7 +133,9 @@ #define DCU_LAYER_RLE_EN BIT(15) #define DCU_LAYER_LUOFFS(x) ((x) << 4) #define DCU_LAYER_BB_ON BIT(2) -#define DCU_LAYER_AB(x) (x) +#define DCU_LAYER_AB_NONE 0 +#define DCU_LAYER_AB_CHROMA_KEYING 1 +#define DCU_LAYER_AB_WHOLE_FRAME 2 #define DCU_LAYER_CKMAX_R(x) ((x) << 16) #define DCU_LAYER_CKMAX_G(x) ((x) << 8) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c index 0ef5959710e7..c564ec612b59 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c @@ -25,6 +25,8 @@ static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = { int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev) { + int ret; + drm_mode_config_init(fsl_dev->drm); fsl_dev->drm->mode_config.min_width = 0; @@ -33,11 +35,25 @@ int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev) fsl_dev->drm->mode_config.max_height = 2047; fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs; - drm_kms_helper_poll_init(fsl_dev->drm); - fsl_dcu_drm_crtc_create(fsl_dev); - fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); - fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder); + ret = fsl_dcu_drm_crtc_create(fsl_dev); + if (ret) + return ret; + + ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); + if (ret) + goto fail_encoder; + + ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder); + if (ret) + goto fail_connector; + drm_mode_config_reset(fsl_dev->drm); + drm_kms_helper_poll_init(fsl_dev->drm); return 0; +fail_encoder: + fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc); +fail_connector: + fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder); + return ret; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 4b13cf919575..274558b3b32b 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -41,11 +41,17 @@ static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane, { struct drm_framebuffer *fb = state->fb; + if (!state->fb || !state->crtc) + return 0; + switch (fb->pixel_format) { case DRM_FORMAT_RGB565: case DRM_FORMAT_RGB888: + case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB1555: case DRM_FORMAT_ARGB1555: case DRM_FORMAT_YUV422: return 0; @@ -59,19 +65,15 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane, { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; unsigned int value; - int index, ret; + int index; index = fsl_dcu_drm_plane_index(plane); if (index < 0) return; - ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value); - if (ret) - dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n"); + regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value); value &= ~DCU_LAYER_EN; - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value); - if (ret) - dev_err(fsl_dev->dev, "set DCU register failed\n"); + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value); } static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, @@ -82,8 +84,8 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *state = plane->state; struct drm_framebuffer *fb = plane->state->fb; struct drm_gem_cma_object *gem; - unsigned int alpha, bpp; - int index, ret; + unsigned int alpha = DCU_LAYER_AB_NONE, bpp; + int index; if (!fb) return; @@ -97,96 +99,74 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, switch (fb->pixel_format) { case DRM_FORMAT_RGB565: bpp = FSL_DCU_RGB565; - alpha = 0xff; break; case DRM_FORMAT_RGB888: bpp = FSL_DCU_RGB888; - alpha = 0xff; break; case DRM_FORMAT_ARGB8888: + alpha = DCU_LAYER_AB_WHOLE_FRAME; + /* fall-through */ + case DRM_FORMAT_XRGB8888: bpp = FSL_DCU_ARGB8888; - alpha = 0xff; break; - case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_ARGB4444: + alpha = DCU_LAYER_AB_WHOLE_FRAME; + /* fall-through */ + case DRM_FORMAT_XRGB4444: bpp = FSL_DCU_ARGB4444; - alpha = 0xff; break; case DRM_FORMAT_ARGB1555: + alpha = DCU_LAYER_AB_WHOLE_FRAME; + /* fall-through */ + case DRM_FORMAT_XRGB1555: bpp = FSL_DCU_ARGB1555; - alpha = 0xff; break; case DRM_FORMAT_YUV422: bpp = FSL_DCU_YUV422; - alpha = 0xff; break; default: return; } - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1), - DCU_LAYER_HEIGHT(state->crtc_h) | - DCU_LAYER_WIDTH(state->crtc_w)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2), - DCU_LAYER_POSY(state->crtc_y) | - DCU_LAYER_POSX(state->crtc_x)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, - DCU_CTRLDESCLN(index, 3), gem->paddr); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), - DCU_LAYER_EN | - DCU_LAYER_TRANS(alpha) | - DCU_LAYER_BPP(bpp) | - DCU_LAYER_AB(0)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5), - DCU_LAYER_CKMAX_R(0xFF) | - DCU_LAYER_CKMAX_G(0xFF) | - DCU_LAYER_CKMAX_B(0xFF)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6), - DCU_LAYER_CKMIN_R(0) | - DCU_LAYER_CKMIN_G(0) | - DCU_LAYER_CKMIN_B(0)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8), - DCU_LAYER_FG_FCOLOR(0)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9), - DCU_LAYER_BG_BCOLOR(0)); - if (ret) - goto set_failed; + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1), + DCU_LAYER_HEIGHT(state->crtc_h) | + DCU_LAYER_WIDTH(state->crtc_w)); + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2), + DCU_LAYER_POSY(state->crtc_y) | + DCU_LAYER_POSX(state->crtc_x)); + regmap_write(fsl_dev->regmap, + DCU_CTRLDESCLN(index, 3), gem->paddr); + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), + DCU_LAYER_EN | + DCU_LAYER_TRANS(0xff) | + DCU_LAYER_BPP(bpp) | + alpha); + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5), + DCU_LAYER_CKMAX_R(0xFF) | + DCU_LAYER_CKMAX_G(0xFF) | + DCU_LAYER_CKMAX_B(0xFF)); + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6), + DCU_LAYER_CKMIN_R(0) | + DCU_LAYER_CKMIN_G(0) | + DCU_LAYER_CKMIN_B(0)); + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0); + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8), + DCU_LAYER_FG_FCOLOR(0)); + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9), + DCU_LAYER_BG_BCOLOR(0)); + if (!strcmp(fsl_dev->soc->name, "ls1021a")) { - ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10), - DCU_LAYER_POST_SKIP(0) | - DCU_LAYER_PRE_SKIP(0)); - if (ret) - goto set_failed; + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10), + DCU_LAYER_POST_SKIP(0) | + DCU_LAYER_PRE_SKIP(0)); } - ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, - DCU_MODE_DCU_MODE_MASK, - DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); - if (ret) - goto set_failed; - ret = regmap_write(fsl_dev->regmap, - DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); - if (ret) - goto set_failed; - return; + regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); + regmap_write(fsl_dev->regmap, + DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); -set_failed: - dev_err(fsl_dev->dev, "set DCU register failed\n"); + return; } static void @@ -213,6 +193,7 @@ static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = { static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane) { drm_plane_cleanup(plane); + kfree(plane); } static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = { @@ -227,8 +208,11 @@ static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = { static const u32 fsl_dcu_drm_plane_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, + DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB4444, DRM_FORMAT_ARGB4444, + DRM_FORMAT_XRGB1555, DRM_FORMAT_ARGB1555, DRM_FORMAT_YUV422, }; diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index d0717a85c7ec..b837e7a92196 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -217,7 +217,6 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector, static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = { .dpms = cdv_intel_crt_dpms, - .mode_fixup = gma_encoder_mode_fixup, .prepare = gma_encoder_prepare, .commit = gma_encoder_commit, .mode_set = cdv_intel_crt_mode_set, diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index ddf2d7700759..28f9d90988ff 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -255,7 +255,6 @@ static void cdv_hdmi_destroy(struct drm_connector *connector) static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = { .dpms = cdv_hdmi_dpms, - .mode_fixup = gma_encoder_mode_fixup, .prepare = gma_encoder_prepare, .mode_set = cdv_hdmi_mode_set, .commit = gma_encoder_commit, diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index cb95765050cc..033d894d030e 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -674,29 +674,17 @@ static const struct drm_mode_config_funcs psb_mode_funcs = { .output_poll_changed = psbfb_output_poll_changed, }; -static int psb_create_backlight_property(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - struct drm_property *backlight; - - if (dev_priv->backlight_property) - return 0; - - backlight = drm_property_create_range(dev, 0, "backlight", 0, 100); - - dev_priv->backlight_property = backlight; - - return 0; -} - static void psb_setup_outputs(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct drm_connector *connector; drm_mode_create_scaling_mode_property(dev); - psb_create_backlight_property(dev); + /* It is ok for this to fail - we just don't get backlight control */ + if (!dev_priv->backlight_property) + dev_priv->backlight_property = drm_property_create_range(dev, 0, + "backlight", 0, 100); dev_priv->ops->output_init(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index ff17af4cfc64..927082148d4d 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -478,13 +478,6 @@ int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) return 0; } -bool gma_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - bool gma_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index ed569d8a6af3..78b9f986a6e5 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -90,9 +90,6 @@ extern void gma_crtc_restore(struct drm_crtc *crtc); extern void gma_encoder_prepare(struct drm_encoder *encoder); extern void gma_encoder_commit(struct drm_encoder *encoder); extern void gma_encoder_destroy(struct drm_encoder *encoder); -extern bool gma_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); /* Common clock related functions */ extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk); diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c index 566d330aaeea..e7e22187c539 100644 --- a/drivers/gpu/drm/gma500/intel_gmbus.c +++ b/drivers/gpu/drm/gma500/intel_gmbus.c @@ -436,7 +436,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev) return 0; err: - while (--i) { + while (i--) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; i2c_del_adapter(&bus->adapter); } diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index d758f4cc6805..907cb51795c3 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -382,16 +382,6 @@ static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode) -{ - if (mode == connector->dpms) - return; - - /*first, execute dpms*/ - - drm_helper_connector_dpms(connector, mode); -} - static struct drm_encoder *mdfld_dsi_connector_best_encoder( struct drm_connector *connector) { @@ -404,7 +394,7 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder( /*DSI connector funcs*/ static const struct drm_connector_funcs mdfld_dsi_connector_funcs = { - .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms, + .dpms = drm_helper_connector_dpms, .detect = mdfld_dsi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = mdfld_dsi_connector_set_property, diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 2d18499d6060..8b2eb32ee988 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -601,7 +601,6 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector) static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = { .dpms = oaktrail_hdmi_dpms, - .mode_fixup = gma_encoder_mode_fixup, .prepare = gma_encoder_prepare, .mode_set = oaktrail_hdmi_mode_set, .commit = gma_encoder_commit, diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 92e7e5795398..4e1c6850520e 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -442,14 +442,6 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, /* FIXME: do we need to wrap the other side of this */ } -/* - * When a client dies: - * - Check for and clean up flipped page state - */ -static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv) -{ -} - static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { return drm_get_pci_dev(pdev, ent, &driver); @@ -495,7 +487,6 @@ static struct drm_driver driver = { .load = psb_driver_load, .unload = psb_driver_unload, .lastclose = psb_driver_lastclose, - .preclose = psb_driver_preclose, .set_busid = drm_pci_set_busid, .num_ioctls = ARRAY_SIZE(psb_ioctls), diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index 90db5f4dcce5..0594c45f7164 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c @@ -253,6 +253,8 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder, drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names); priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2); + if (!priv->scale_property) + return -ENOMEM; drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property, priv->select_subconnector); diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c index c400428f6c8c..db0b03fb0ff1 100644 --- a/drivers/gpu/drm/i2c/sil164_drv.c +++ b/drivers/gpu/drm/i2c/sil164_drv.c @@ -252,14 +252,6 @@ sil164_encoder_restore(struct drm_encoder *encoder) priv->saved_slave_state); } -static bool -sil164_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static int sil164_encoder_mode_valid(struct drm_encoder *encoder, struct drm_display_mode *mode) @@ -347,7 +339,6 @@ static const struct drm_encoder_slave_funcs sil164_encoder_funcs = { .dpms = sil164_encoder_dpms, .save = sil164_encoder_save, .restore = sil164_encoder_restore, - .mode_fixup = sil164_encoder_mode_fixup, .mode_valid = sil164_encoder_mode_valid, .mode_set = sil164_encoder_mode_set, .detect = sil164_encoder_detect, diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 34e38749a817..b61282d89aa3 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -856,14 +856,6 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode) priv->dpms = mode; } -static bool -tda998x_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static int tda998x_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { @@ -1343,7 +1335,6 @@ static void tda998x_encoder_commit(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = { .dpms = tda998x_encoder_dpms, - .mode_fixup = tda998x_encoder_mode_fixup, .prepare = tda998x_encoder_prepare, .commit = tda998x_encoder_commit, .mode_set = tda998x_encoder_mode_set, diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 051eab33e4c7..4c59793c4ccb 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -2,9 +2,7 @@ config DRM_I915 tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" depends on DRM depends on X86 && PCI - depends on (AGP || AGP=n) select INTEL_GTT - select AGP_INTEL if AGP select INTERVAL_TREE # we need shmfs for the swappable backing store, and in particular # the shmem_readpage() which depends upon tmpfs diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0fc38bb7276c..ec0c2a05eed6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1331,7 +1331,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) struct intel_engine_cs *ring; u64 acthd[I915_NUM_RINGS]; u32 seqno[I915_NUM_RINGS]; - int i; + u32 instdone[I915_NUM_INSTDONE_REG]; + int i, j; if (!i915.enable_hangcheck) { seq_printf(m, "Hangcheck disabled\n"); @@ -1345,6 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) acthd[i] = intel_ring_get_active_head(ring); } + i915_get_extra_instdone(dev, instdone); + intel_runtime_pm_put(dev_priv); if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { @@ -1365,6 +1368,21 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) (long long)ring->hangcheck.max_acthd); seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); seq_printf(m, "\taction = %d\n", ring->hangcheck.action); + + if (ring->id == RCS) { + seq_puts(m, "\tinstdone read ="); + + for (j = 0; j < I915_NUM_INSTDONE_REG; j++) + seq_printf(m, " 0x%08x", instdone[j]); + + seq_puts(m, "\n\tinstdone accu ="); + + for (j = 0; j < I915_NUM_INSTDONE_REG; j++) + seq_printf(m, " 0x%08x", + ring->hangcheck.instdone[j]); + + seq_puts(m, "\n"); + } } return 0; @@ -1942,11 +1960,8 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_puts(m, "HW context "); describe_ctx(m, ctx); - for_each_ring(ring, dev_priv, i) { - if (ring->default_context == ctx) - seq_printf(m, "(default context %s) ", - ring->name); - } + if (ctx == dev_priv->kernel_context) + seq_printf(m, "(kernel context) "); if (i915.enable_execlists) { seq_putc(m, '\n'); @@ -1976,12 +1991,13 @@ static int i915_context_status(struct seq_file *m, void *unused) } static void i915_dump_lrc_obj(struct seq_file *m, - struct intel_engine_cs *ring, - struct drm_i915_gem_object *ctx_obj) + struct intel_context *ctx, + struct intel_engine_cs *ring) { struct page *page; uint32_t *reg_state; int j; + struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; unsigned long ggtt_offset = 0; if (ctx_obj == NULL) { @@ -1991,7 +2007,7 @@ static void i915_dump_lrc_obj(struct seq_file *m, } seq_printf(m, "CONTEXT: %s %u\n", ring->name, - intel_execlists_ctx_id(ctx_obj)); + intel_execlists_ctx_id(ctx, ring)); if (!i915_gem_obj_ggtt_bound(ctx_obj)) seq_puts(m, "\tNot bound in GGTT\n"); @@ -2037,13 +2053,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) if (ret) return ret; - list_for_each_entry(ctx, &dev_priv->context_list, link) { - for_each_ring(ring, dev_priv, i) { - if (ring->default_context != ctx) - i915_dump_lrc_obj(m, ring, - ctx->engine[i].state); - } - } + list_for_each_entry(ctx, &dev_priv->context_list, link) + if (ctx != dev_priv->kernel_context) + for_each_ring(ring, dev_priv, i) + i915_dump_lrc_obj(m, ctx, ring); mutex_unlock(&dev->struct_mutex); @@ -2092,13 +2105,13 @@ static int i915_execlists(struct seq_file *m, void *data) seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", read_pointer, write_pointer); - for (i = 0; i < 6; i++) { + for (i = 0; i < GEN8_CSB_ENTRIES; i++) { status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i)); ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i)); @@ -2115,11 +2128,8 @@ static int i915_execlists(struct seq_file *m, void *data) seq_printf(m, "\t%d requests in queue\n", count); if (head_req) { - struct drm_i915_gem_object *ctx_obj; - - ctx_obj = head_req->ctx->engine[ring_id].state; seq_printf(m, "\tHead request id: %u\n", - intel_execlists_ctx_id(ctx_obj)); + intel_execlists_ctx_id(head_req->ctx, ring)); seq_printf(m, "\tHead request tail: %u\n", head_req->tail); } @@ -2453,9 +2463,9 @@ static void i915_guc_client_info(struct seq_file *m, for_each_ring(ring, dev_priv, i) { seq_printf(m, "\tSubmissions: %llu %s\n", - client->submissions[i], + client->submissions[ring->guc_id], ring->name); - tot += client->submissions[i]; + tot += client->submissions[ring->guc_id]; } seq_printf(m, "\tTotal: %llu\n", tot); } @@ -2492,10 +2502,10 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "\nGuC submissions:\n"); for_each_ring(ring, dev_priv, i) { - seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n", - ring->name, guc.submissions[i], - guc.last_seqno[i], guc.last_seqno[i]); - total += guc.submissions[i]; + seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", + ring->name, guc.submissions[ring->guc_id], + guc.last_seqno[ring->guc_id]); + total += guc.submissions[ring->guc_id]; } seq_printf(m, "\t%s: %llu\n", "Total", total); @@ -2573,6 +2583,10 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) enabled = true; } } + + seq_printf(m, "Main link in standby mode: %s\n", + yesno(dev_priv->psr.link_standby)); + seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); if (!HAS_DDI(dev)) @@ -3211,9 +3225,11 @@ static int i915_wa_registers(struct seq_file *m, void *unused) { int i; int ret; + struct intel_engine_cs *ring; struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_workarounds *workarounds = &dev_priv->workarounds; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -3221,15 +3237,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); - for (i = 0; i < dev_priv->workarounds.count; ++i) { + seq_printf(m, "Workarounds applied: %d\n", workarounds->count); + for_each_ring(ring, dev_priv, i) + seq_printf(m, "HW whitelist count for %s: %d\n", + ring->name, workarounds->hw_whitelist_count[i]); + for (i = 0; i < workarounds->count; ++i) { i915_reg_t addr; u32 mask, value, read; bool ok; - addr = dev_priv->workarounds.reg[i].addr; - mask = dev_priv->workarounds.reg[i].mask; - value = dev_priv->workarounds.reg[i].value; + addr = workarounds->reg[i].addr; + mask = workarounds->reg[i].mask; + value = workarounds->reg[i].value; read = I915_READ(addr); ok = (value & mask) == (read & mask); seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index d70d96fe553b..2df2fac04708 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -391,20 +391,13 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_vga_client; - /* Initialise stolen first so that we may reserve preallocated - * objects for the BIOS to KMS transition. - */ - ret = i915_gem_init_stolen(dev); - if (ret) - goto cleanup_vga_switcheroo; - intel_power_domains_init_hw(dev_priv, false); intel_csr_ucode_init(dev_priv); ret = intel_irq_install(dev_priv); if (ret) - goto cleanup_gem_stolen; + goto cleanup_csr; intel_setup_gmbus(dev); @@ -451,16 +444,15 @@ static int i915_load_modeset_init(struct drm_device *dev) cleanup_gem: mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); + i915_gem_cleanup_engines(dev); mutex_unlock(&dev->struct_mutex); cleanup_irq: intel_guc_ucode_fini(dev); drm_irq_uninstall(dev); intel_teardown_gmbus(dev); -cleanup_gem_stolen: - i915_gem_cleanup_stolen(dev); -cleanup_vga_switcheroo: +cleanup_csr: + intel_csr_ucode_fini(dev_priv); vga_switcheroo_unregister_client(dev->pdev); cleanup_vga_client: vga_client_register(dev->pdev, NULL, NULL, NULL); @@ -816,7 +808,41 @@ static void intel_device_info_runtime_init(struct drm_device *dev) !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { DRM_INFO("Display fused off, disabling\n"); info->num_pipes = 0; + } else if (fuse_strap & IVB_PIPE_C_DISABLE) { + DRM_INFO("PipeC fused off\n"); + info->num_pipes -= 1; } + } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { + u32 dfsm = I915_READ(SKL_DFSM); + u8 disabled_mask = 0; + bool invalid; + int num_bits; + + if (dfsm & SKL_DFSM_PIPE_A_DISABLE) + disabled_mask |= BIT(PIPE_A); + if (dfsm & SKL_DFSM_PIPE_B_DISABLE) + disabled_mask |= BIT(PIPE_B); + if (dfsm & SKL_DFSM_PIPE_C_DISABLE) + disabled_mask |= BIT(PIPE_C); + + num_bits = hweight8(disabled_mask); + + switch (disabled_mask) { + case BIT(PIPE_A): + case BIT(PIPE_B): + case BIT(PIPE_A) | BIT(PIPE_B): + case BIT(PIPE_A) | BIT(PIPE_C): + invalid = true; + break; + default: + invalid = false; + } + + if (num_bits > info->num_pipes || invalid) + DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", + disabled_mask); + else + info->num_pipes -= num_bits; } /* Initialize slice/subslice/EU info */ @@ -855,6 +881,94 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv) } } +static int i915_workqueues_init(struct drm_i915_private *dev_priv) +{ + /* + * The i915 workqueue is primarily used for batched retirement of + * requests (and thus managing bo) once the task has been completed + * by the GPU. i915_gem_retire_requests() is called directly when we + * need high-priority retirement, such as waiting for an explicit + * bo. + * + * It is also used for periodic low-priority events, such as + * idle-timers and recording error state. + * + * All tasks on the workqueue are expected to acquire the dev mutex + * so there is no point in running more than one instance of the + * workqueue at any time. Use an ordered one. + */ + dev_priv->wq = alloc_ordered_workqueue("i915", 0); + if (dev_priv->wq == NULL) + goto out_err; + + dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->hotplug.dp_wq == NULL) + goto out_free_wq; + + dev_priv->gpu_error.hangcheck_wq = + alloc_ordered_workqueue("i915-hangcheck", 0); + if (dev_priv->gpu_error.hangcheck_wq == NULL) + goto out_free_dp_wq; + + return 0; + +out_free_dp_wq: + destroy_workqueue(dev_priv->hotplug.dp_wq); +out_free_wq: + destroy_workqueue(dev_priv->wq); +out_err: + DRM_ERROR("Failed to allocate workqueues.\n"); + + return -ENOMEM; +} + +static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) +{ + destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); + destroy_workqueue(dev_priv->hotplug.dp_wq); + destroy_workqueue(dev_priv->wq); +} + +static int i915_mmio_setup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int mmio_bar; + int mmio_size; + + mmio_bar = IS_GEN2(dev) ? 1 : 0; + /* + * Before gen4, the registers and the GTT are behind different BARs. + * However, from gen4 onwards, the registers and the GTT are shared + * in the same BAR, so we want to restrict this ioremap from + * clobbering the GTT which we want ioremap_wc instead. Fortunately, + * the register BAR remains the same size for all the earlier + * generations up to Ironlake. + */ + if (INTEL_INFO(dev)->gen < 5) + mmio_size = 512 * 1024; + else + mmio_size = 2 * 1024 * 1024; + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); + if (dev_priv->regs == NULL) { + DRM_ERROR("failed to map registers\n"); + + return -EIO; + } + + /* Try to make sure MCHBAR is enabled before poking at it */ + intel_setup_mchbar(dev); + + return 0; +} + +static void i915_mmio_cleanup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + intel_teardown_mchbar(dev); + pci_iounmap(dev->pdev, dev_priv->regs); +} + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -870,7 +984,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; struct intel_device_info *info, *device_info; - int ret = 0, mmio_bar, mmio_size; + int ret = 0; uint32_t aperture_size; info = (struct intel_device_info *) flags; @@ -897,6 +1011,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->av_mutex); + ret = i915_workqueues_init(dev_priv); + if (ret < 0) + goto out_free_priv; + intel_pm_setup(dev); intel_runtime_pm_get(dev_priv); @@ -915,28 +1033,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (i915_get_bridge_dev(dev)) { ret = -EIO; - goto free_priv; + goto out_runtime_pm_put; } - mmio_bar = IS_GEN2(dev) ? 1 : 0; - /* Before gen4, the registers and the GTT are behind different BARs. - * However, from gen4 onwards, the registers and the GTT are shared - * in the same BAR, so we want to restrict this ioremap from - * clobbering the GTT which we want ioremap_wc instead. Fortunately, - * the register BAR remains the same size for all the earlier - * generations up to Ironlake. - */ - if (info->gen < 5) - mmio_size = 512*1024; - else - mmio_size = 2*1024*1024; - - dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); - if (!dev_priv->regs) { - DRM_ERROR("failed to map registers\n"); - ret = -EIO; + ret = i915_mmio_setup(dev); + if (ret < 0) goto put_bridge; - } /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev); @@ -945,7 +1047,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ret = i915_gem_gtt_init(dev); if (ret) - goto out_freecsr; + goto out_uncore_fini; /* WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. */ @@ -991,49 +1093,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, aperture_size); - /* The i915 workqueue is primarily used for batched retirement of - * requests (and thus managing bo) once the task has been completed - * by the GPU. i915_gem_retire_requests() is called directly when we - * need high-priority retirement, such as waiting for an explicit - * bo. - * - * It is also used for periodic low-priority events, such as - * idle-timers and recording error state. - * - * All tasks on the workqueue are expected to acquire the dev mutex - * so there is no point in running more than one instance of the - * workqueue at any time. Use an ordered one. - */ - dev_priv->wq = alloc_ordered_workqueue("i915", 0); - if (dev_priv->wq == NULL) { - DRM_ERROR("Failed to create our workqueue.\n"); - ret = -ENOMEM; - goto out_mtrrfree; - } - - dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); - if (dev_priv->hotplug.dp_wq == NULL) { - DRM_ERROR("Failed to create our dp workqueue.\n"); - ret = -ENOMEM; - goto out_freewq; - } - - dev_priv->gpu_error.hangcheck_wq = - alloc_ordered_workqueue("i915-hangcheck", 0); - if (dev_priv->gpu_error.hangcheck_wq == NULL) { - DRM_ERROR("Failed to create our hangcheck workqueue.\n"); - ret = -ENOMEM; - goto out_freedpwq; - } - intel_irq_init(dev_priv); intel_uncore_sanitize(dev); - /* Try to make sure MCHBAR is enabled before poking at it */ - intel_setup_mchbar(dev); intel_opregion_setup(dev); - i915_gem_load(dev); + i915_gem_load_init(dev); + i915_gem_shrinker_init(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there @@ -1046,8 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * be lost or delayed, but we use them anyways to avoid * stuck interrupts on some machines. */ - if (!IS_I945G(dev) && !IS_I945GM(dev)) - pci_enable_msi(dev->pdev); + if (!IS_I945G(dev) && !IS_I945GM(dev)) { + if (pci_enable_msi(dev->pdev) < 0) + DRM_DEBUG_DRIVER("can't enable MSI"); + } intel_device_info_runtime_init(dev); @@ -1097,38 +1165,29 @@ out_power_well: intel_power_domains_fini(dev_priv); drm_vblank_cleanup(dev); out_gem_unload: - WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); - unregister_shrinker(&dev_priv->mm.shrinker); + i915_gem_shrinker_cleanup(dev_priv); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_teardown_mchbar(dev); pm_qos_remove_request(&dev_priv->pm_qos); - destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); -out_freedpwq: - destroy_workqueue(dev_priv->hotplug.dp_wq); -out_freewq: - destroy_workqueue(dev_priv->wq); -out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); io_mapping_free(dev_priv->gtt.mappable); out_gtt: i915_global_gtt_cleanup(dev); -out_freecsr: - intel_csr_ucode_fini(dev_priv); +out_uncore_fini: intel_uncore_fini(dev); - pci_iounmap(dev->pdev, dev_priv->regs); + i915_mmio_cleanup(dev); put_bridge: pci_dev_put(dev_priv->bridge_dev); -free_priv: - kmem_cache_destroy(dev_priv->requests); - kmem_cache_destroy(dev_priv->vmas); - kmem_cache_destroy(dev_priv->objects); - + i915_gem_load_cleanup(dev); +out_runtime_pm_put: intel_runtime_pm_put(dev_priv); - + i915_workqueues_cleanup(dev_priv); +out_free_priv: kfree(dev_priv); + return ret; } @@ -1153,8 +1212,7 @@ int i915_driver_unload(struct drm_device *dev) i915_teardown_sysfs(dev); - WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); - unregister_shrinker(&dev_priv->mm.shrinker); + i915_gem_shrinker_cleanup(dev_priv); io_mapping_free(dev_priv->gtt.mappable); arch_phys_wc_del(dev_priv->gtt.mtrr); @@ -1182,6 +1240,8 @@ int i915_driver_unload(struct drm_device *dev) vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); + intel_csr_ucode_fini(dev_priv); + /* Free error state after interrupts are fully disabled. */ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); i915_destroy_error_state(dev); @@ -1196,31 +1256,21 @@ int i915_driver_unload(struct drm_device *dev) intel_guc_ucode_fini(dev); mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); + i915_gem_cleanup_engines(dev); mutex_unlock(&dev->struct_mutex); intel_fbc_cleanup_cfb(dev_priv); - i915_gem_cleanup_stolen(dev); - intel_csr_ucode_fini(dev_priv); - - intel_teardown_mchbar(dev); - - destroy_workqueue(dev_priv->hotplug.dp_wq); - destroy_workqueue(dev_priv->wq); - destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); pm_qos_remove_request(&dev_priv->pm_qos); i915_global_gtt_cleanup(dev); intel_uncore_fini(dev); - if (dev_priv->regs != NULL) - pci_iounmap(dev->pdev, dev_priv->regs); + i915_mmio_cleanup(dev); - kmem_cache_destroy(dev_priv->requests); - kmem_cache_destroy(dev_priv->vmas); - kmem_cache_destroy(dev_priv->objects); + i915_gem_load_cleanup(dev); pci_dev_put(dev_priv->bridge_dev); + i915_workqueues_cleanup(dev_priv); kfree(dev_priv); return 0; @@ -1261,8 +1311,6 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) i915_gem_context_close(dev, file); i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); - - intel_modeset_preclose(dev, file); } void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f357058c74d9..44912ecebc1a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -35,9 +35,12 @@ #include "i915_trace.h" #include "intel_drv.h" +#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> +#include <linux/vgaarb.h> +#include <linux/vga_switcheroo.h> #include <drm/drm_crtc_helper.h> static struct drm_driver driver; @@ -969,6 +972,15 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (PCI_FUNC(pdev->devfn)) return -ENODEV; + /* + * apple-gmux is needed on dual GPU MacBook Pro + * to probe the panel if we're the inactive GPU. + */ + if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && + apple_gmux_present() && pdev != vga_default_device() && + !vga_switcheroo_handler_flags()) + return -EPROBE_DEFER; + return drm_get_pci_dev(pdev, ent, &driver); } @@ -1079,7 +1091,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv) */ broxton_init_cdclk(dev); broxton_ddi_phy_init(dev); - intel_prepare_ddi(dev); return 0; } @@ -1338,8 +1349,8 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, return 0; DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", - wait_for_on ? "on" : "off", - I915_READ(VLV_GTLC_PW_STATUS)); + onoff(wait_for_on), + I915_READ(VLV_GTLC_PW_STATUS)); /* * RC6 transitioning can be delayed up to 2 msec (see @@ -1348,7 +1359,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, err = wait_for(COND, 3); if (err) DRM_ERROR("timeout waiting for GT wells to go %s\n", - wait_for_on ? "on" : "off"); + onoff(wait_for_on)); return err; #undef COND @@ -1359,7 +1370,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) return; - DRM_ERROR("GT register access while GT waking disabled\n"); + DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); } @@ -1503,6 +1514,10 @@ static int intel_runtime_suspend(struct device *device) enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); + + if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) + DRM_ERROR("Unclaimed access detected prior to suspending\n"); + dev_priv->pm.suspended = true; /* @@ -1551,6 +1566,8 @@ static int intel_runtime_resume(struct device *device) intel_opregion_notify_adapter(dev, PCI_D0); dev_priv->pm.suspended = false; + if (intel_uncore_unclaimed_mmio(dev_priv)) + DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); intel_guc_resume(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f0f75d7c0d94..64cfd446453c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -34,6 +34,7 @@ #include <uapi/drm/drm_fourcc.h> #include <drm/drmP.h> +#include "i915_params.h" #include "i915_reg.h" #include "intel_bios.h" #include "intel_ringbuffer.h" @@ -58,7 +59,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20151218" +#define DRIVER_DATE "20160214" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -69,11 +70,11 @@ BUILD_BUG_ON(__i915_warn_cond); \ WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) #else -#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x ) +#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") #endif #undef WARN_ON_ONCE -#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x ) +#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ (long) (x), __func__); @@ -87,31 +88,25 @@ */ #define I915_STATE_WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) { \ - if (i915.verbose_state_checks) \ - WARN(1, format); \ - else \ + if (unlikely(__ret_warn_on)) \ + if (!WARN(i915.verbose_state_checks, format)) \ DRM_ERROR(format); \ - } \ unlikely(__ret_warn_on); \ }) -#define I915_STATE_WARN_ON(condition) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) { \ - if (i915.verbose_state_checks) \ - WARN(1, "WARN_ON(" #condition ")\n"); \ - else \ - DRM_ERROR("WARN_ON(" #condition ")\n"); \ - } \ - unlikely(__ret_warn_on); \ -}) +#define I915_STATE_WARN_ON(x) \ + I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") static inline const char *yesno(bool v) { return v ? "yes" : "no"; } +static inline const char *onoff(bool v) +{ + return v ? "on" : "off"; +} + enum pipe { INVALID_PIPE = -1, PIPE_A = 0, @@ -339,7 +334,7 @@ struct drm_i915_file_private { unsigned boosts; } rps; - struct intel_engine_cs *bsd_ring; + unsigned int bsd_ring; }; enum intel_dpll_id { @@ -633,6 +628,7 @@ struct drm_i915_display_funcs { struct dpll *best_clock); int (*compute_pipe_wm)(struct intel_crtc *crtc, struct drm_atomic_state *state); + void (*program_watermarks)(struct intel_crtc_state *cstate); void (*update_wm)(struct drm_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state); @@ -657,9 +653,6 @@ struct drm_i915_display_funcs { struct drm_i915_gem_object *obj, struct drm_i915_gem_request *req, uint32_t flags); - void (*update_primary_plane)(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y); void (*hpd_irq_setup)(struct drm_device *dev); /* clock updates for mode set */ /* cursor updates */ @@ -726,6 +719,8 @@ struct intel_uncore { i915_reg_t reg_post; u32 val_reset; } fw_domain[FW_DOMAIN_ID_COUNT]; + + int unclaimed_mmio_check; }; /* Iterate over initialised fw domains */ @@ -889,6 +884,9 @@ struct intel_context { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; int pin_count; + struct i915_vma *lrc_vma; + u64 lrc_desc; + uint32_t *lrc_reg_state; } engine[I915_NUM_RINGS]; struct list_head link; @@ -902,16 +900,15 @@ enum fb_op_origin { ORIGIN_DIRTYFB, }; -struct i915_fbc { +struct intel_fbc { /* This is always the inner lock when overlapping with struct_mutex and * it's the outer lock when overlapping with stolen_lock. */ struct mutex lock; unsigned threshold; - unsigned int fb_id; unsigned int possible_framebuffer_bits; unsigned int busy_bits; + unsigned int visible_pipes_mask; struct intel_crtc *crtc; - int y; struct drm_mm_node compressed_fb; struct drm_mm_node *compressed_llb; @@ -921,18 +918,52 @@ struct i915_fbc { bool enabled; bool active; + struct intel_fbc_state_cache { + struct { + unsigned int mode_flags; + uint32_t hsw_bdw_pixel_rate; + } crtc; + + struct { + unsigned int rotation; + int src_w; + int src_h; + bool visible; + } plane; + + struct { + u64 ilk_ggtt_offset; + uint32_t pixel_format; + unsigned int stride; + int fence_reg; + unsigned int tiling_mode; + } fb; + } state_cache; + + struct intel_fbc_reg_params { + struct { + enum pipe pipe; + enum plane plane; + unsigned int fence_y_offset; + } crtc; + + struct { + u64 ggtt_offset; + uint32_t pixel_format; + unsigned int stride; + int fence_reg; + } fb; + + int cfb_size; + } params; + struct intel_fbc_work { bool scheduled; + u32 scheduled_vblank; struct work_struct work; - struct drm_framebuffer *fb; - unsigned long enable_jiffies; } work; const char *no_fbc_reason; - - bool (*is_active)(struct drm_i915_private *dev_priv); - void (*activate)(struct intel_crtc *crtc); - void (*deactivate)(struct drm_i915_private *dev_priv); }; /** @@ -972,6 +1003,7 @@ struct i915_psr { unsigned busy_frontbuffer_bits; bool psr2_support; bool aux_frame_sync; + bool link_standby; }; enum intel_pch { @@ -1301,7 +1333,7 @@ struct i915_gem_mm { bool busy; /* the indicator for dispatch video commands on two BSD rings */ - int bsd_ring_dispatch_index; + unsigned int bsd_ring_dispatch_index; /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; @@ -1487,7 +1519,7 @@ struct intel_vbt_data { u8 seq_version; u32 size; u8 *data; - u8 *sequence[MIPI_SEQ_MAX]; + const u8 *sequence[MIPI_SEQ_MAX]; } dsi; int crt_ddc_pin; @@ -1659,11 +1691,18 @@ struct i915_wa_reg { u32 mask; }; -#define I915_MAX_WA_REGS 16 +/* + * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only + * allowing it for RCS as we don't foresee any requirement of having + * a whitelist for other engines. When it is really required for + * other engines then the limit need to be increased. + */ +#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) struct i915_workarounds { struct i915_wa_reg reg[I915_MAX_WA_REGS]; u32 count; + u32 hw_whitelist_count[I915_NUM_RINGS]; }; struct i915_virtual_gpu { @@ -1760,7 +1799,7 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; - struct i915_fbc fbc; + struct intel_fbc fbc; struct i915_drrs drrs; struct intel_opregion opregion; struct intel_vbt_data vbt; @@ -1784,7 +1823,7 @@ struct drm_i915_private { unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int skl_boot_cdclk; - unsigned int cdclk_freq, max_cdclk_freq; + unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; unsigned int max_dotclk_freq; unsigned int hpll_freq; unsigned int czclk_freq; @@ -1829,8 +1868,13 @@ struct drm_i915_private { struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; #endif + /* dpll and cdclk state is protected by connection_mutex */ int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; + + unsigned int active_crtcs; + unsigned int min_pixclk[I915_MAX_PIPES]; + int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; struct i915_workarounds workarounds; @@ -1945,6 +1989,8 @@ struct drm_i915_private { void (*stop_ring)(struct intel_engine_cs *ring); } gt; + struct intel_context *kernel_context; + bool edp_low_vswing; /* perform PHY state sanity checks? */ @@ -1988,6 +2034,9 @@ enum hdmi_force_audio { #define I915_GTT_OFFSET_NONE ((u32)-1) struct drm_i915_gem_object_ops { + unsigned int flags; +#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 + /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set * of pages before to binding them into the GTT, and put_pages() is @@ -2003,6 +2052,7 @@ struct drm_i915_gem_object_ops { */ int (*get_pages)(struct drm_i915_gem_object *); void (*put_pages)(struct drm_i915_gem_object *); + int (*dmabuf_export)(struct drm_i915_gem_object *); void (*release)(struct drm_i915_gem_object *); }; @@ -2265,9 +2315,9 @@ struct drm_i915_gem_request { }; -int i915_gem_request_alloc(struct intel_engine_cs *ring, - struct intel_context *ctx, - struct drm_i915_gem_request **req_out); +struct drm_i915_gem_request * __must_check +i915_gem_request_alloc(struct intel_engine_cs *engine, + struct intel_context *ctx); void i915_gem_request_cancel(struct drm_i915_gem_request *req); void i915_gem_request_free(struct kref *req_ref); int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, @@ -2576,6 +2626,11 @@ struct drm_i915_cmd_table { /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) + +/* WaRsDisableCoarsePowerGating:skl,bxt */ +#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ + ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ + IS_SKL_REVID(dev, 0, SKL_REVID_F0))) /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts * even when in MSI mode. This results in spurious interrupt warnings if the @@ -2665,44 +2720,7 @@ extern int i915_max_ioctl; extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); -/* i915_params.c */ -struct i915_params { - int modeset; - int panel_ignore_lid; - int semaphores; - int lvds_channel_mode; - int panel_use_ssc; - int vbt_sdvo_panel_type; - int enable_rc6; - int enable_dc; - int enable_fbc; - int enable_ppgtt; - int enable_execlists; - int enable_psr; - unsigned int preliminary_hw_support; - int disable_power_well; - int enable_ips; - int invert_brightness; - int enable_cmd_parser; - /* leave bools at the end to not create holes */ - bool enable_hangcheck; - bool fastboot; - bool prefault_disable; - bool load_detect_test; - bool reset; - bool disable_display; - bool disable_vtd_wa; - bool enable_guc_submission; - int guc_log_level; - int use_mmio_flip; - int mmio_debug; - bool verbose_state_checks; - bool nuclear_pageflip; - int edp_vswing; -}; -extern struct i915_params i915 __read_mostly; - - /* i915_dma.c */ +/* i915_dma.c */ extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); @@ -2745,7 +2763,8 @@ extern void intel_uncore_sanitize(struct drm_device *dev); extern void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake); extern void intel_uncore_init(struct drm_device *dev); -extern void intel_uncore_check_errors(struct drm_device *dev); +extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); +extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); extern void intel_uncore_fini(struct drm_device *dev); extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); @@ -2867,7 +2886,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void i915_gem_load(struct drm_device *dev); +void i915_gem_load_init(struct drm_device *dev); +void i915_gem_load_cleanup(struct drm_device *dev); void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, @@ -3038,7 +3058,7 @@ int i915_gem_init_rings(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); void i915_gem_init_swizzling(struct drm_device *dev); -void i915_gem_cleanup_ringbuffer(struct drm_device *dev); +void i915_gem_cleanup_engines(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); void __i915_add_request(struct drm_i915_gem_request *req, @@ -3280,6 +3300,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, #define I915_SHRINK_ACTIVE 0x8 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); +void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); /* i915_gem_tiling.c */ @@ -3450,16 +3471,14 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); -u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); -void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); +void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); -u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); -void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ddc21d4b388d..de57e7f0be0f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1251,7 +1251,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; DEFINE_WAIT(wait); unsigned long timeout_expire; - s64 before, now; + s64 before = 0; /* Only to silence a compiler warning. */ int ret; WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); @@ -1271,14 +1271,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req, return -ETIME; timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); + + /* + * Record current time in case interrupted by signal, or wedged. + */ + before = ktime_get_raw_ns(); } if (INTEL_INFO(dev_priv)->gen >= 6) gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); - /* Record current time in case interrupted by signal, or wedged */ trace_i915_gem_request_wait_begin(req); - before = ktime_get_raw_ns(); /* Optimistic spin for the next jiffie before touching IRQs */ ret = __i915_spin_request(req, state); @@ -1343,11 +1346,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req, finish_wait(&ring->irq_queue, &wait); out: - now = ktime_get_raw_ns(); trace_i915_gem_request_wait_end(req); if (timeout) { - s64 tres = *timeout - (now - before); + s64 tres = *timeout - (ktime_get_raw_ns() - before); *timeout = tres < 0 ? 0 : tres; @@ -2677,10 +2679,8 @@ void i915_gem_request_free(struct kref *req_ref) i915_gem_request_remove_from_client(req); if (ctx) { - if (i915.enable_execlists) { - if (ctx != req->ring->default_context) - intel_lr_context_unpin(req); - } + if (i915.enable_execlists && ctx != req->i915->kernel_context) + intel_lr_context_unpin(ctx, req->ring); i915_gem_context_unreference(ctx); } @@ -2688,9 +2688,10 @@ void i915_gem_request_free(struct kref *req_ref) kmem_cache_free(req->i915->requests, req); } -int i915_gem_request_alloc(struct intel_engine_cs *ring, - struct intel_context *ctx, - struct drm_i915_gem_request **req_out) +static inline int +__i915_gem_request_alloc(struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_request **req_out) { struct drm_i915_private *dev_priv = to_i915(ring->dev); struct drm_i915_gem_request *req; @@ -2753,6 +2754,31 @@ err: return ret; } +/** + * i915_gem_request_alloc - allocate a request structure + * + * @engine: engine that we wish to issue the request on. + * @ctx: context that the request will be associated with. + * This can be NULL if the request is not directly related to + * any specific user context, in which case this function will + * choose an appropriate context to use. + * + * Returns a pointer to the allocated request if successful, + * or an error code if not. + */ +struct drm_i915_gem_request * +i915_gem_request_alloc(struct intel_engine_cs *engine, + struct intel_context *ctx) +{ + struct drm_i915_gem_request *req; + int err; + + if (ctx == NULL) + ctx = to_i915(engine->dev)->kernel_context; + err = __i915_gem_request_alloc(engine, ctx, &req); + return err ? ERR_PTR(err) : req; +} + void i915_gem_request_cancel(struct drm_i915_gem_request *req) { intel_ring_reserved_space_cancel(req->ringbuf); @@ -3170,9 +3196,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, return 0; if (*to_req == NULL) { - ret = i915_gem_request_alloc(to, to->default_context, to_req); - if (ret) - return ret; + struct drm_i915_gem_request *req; + + req = i915_gem_request_alloc(to, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); + + *to_req = req; } trace_i915_gem_ring_sync_to(*to_req, from, from_req); @@ -3372,9 +3402,9 @@ int i915_gpu_idle(struct drm_device *dev) if (!i915.enable_execlists) { struct drm_i915_gem_request *req; - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = i915_switch_context(req); if (ret) { @@ -4328,10 +4358,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, if (ret) goto unref; - BUILD_BUG_ON(I915_NUM_RINGS > 16); - args->busy = obj->active << 16; - if (obj->last_write_req) - args->busy |= obj->last_write_req->ring->id; + args->busy = 0; + if (obj->active) { + int i; + + for (i = 0; i < I915_NUM_RINGS; i++) { + struct drm_i915_gem_request *req; + + req = obj->last_read_req[i]; + if (req) + args->busy |= 1 << (16 + req->ring->exec_id); + } + if (obj->last_write_req) + args->busy |= obj->last_write_req->ring->exec_id; + } unref: drm_gem_object_unreference(&obj->base); @@ -4425,6 +4465,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, } static const struct drm_i915_gem_object_ops i915_gem_object_ops = { + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, .get_pages = i915_gem_object_get_pages_gtt, .put_pages = i915_gem_object_put_pages_gtt, }; @@ -4832,7 +4873,7 @@ i915_gem_init_hw(struct drm_device *dev) */ init_unused_rings(dev); - BUG_ON(!dev_priv->ring[RCS].default_context); + BUG_ON(!dev_priv->kernel_context); ret = i915_ppgtt_init_hw(dev); if (ret) { @@ -4869,11 +4910,10 @@ i915_gem_init_hw(struct drm_device *dev) for_each_ring(ring, dev_priv, i) { struct drm_i915_gem_request *req; - WARN_ON(!ring->default_context); - - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) { - i915_gem_cleanup_ringbuffer(dev); + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + i915_gem_cleanup_engines(dev); goto out; } @@ -4886,7 +4926,7 @@ i915_gem_init_hw(struct drm_device *dev) if (ret && ret != -EIO) { DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret); i915_gem_request_cancel(req); - i915_gem_cleanup_ringbuffer(dev); + i915_gem_cleanup_engines(dev); goto out; } @@ -4894,7 +4934,7 @@ i915_gem_init_hw(struct drm_device *dev) if (ret && ret != -EIO) { DRM_ERROR("Context enable ring #%d failed %d\n", i, ret); i915_gem_request_cancel(req); - i915_gem_cleanup_ringbuffer(dev); + i915_gem_cleanup_engines(dev); goto out; } @@ -4969,7 +5009,7 @@ out_unlock: } void -i915_gem_cleanup_ringbuffer(struct drm_device *dev) +i915_gem_cleanup_engines(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; @@ -4978,13 +5018,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) for_each_ring(ring, dev_priv, i) dev_priv->gt.cleanup_ring(ring); - if (i915.enable_execlists) - /* - * Neither the BIOS, ourselves or any other kernel - * expects the system to be in execlists mode on startup, - * so we need to reset the GPU back to legacy mode. - */ - intel_gpu_reset(dev); + if (i915.enable_execlists) { + /* + * Neither the BIOS, ourselves or any other kernel + * expects the system to be in execlists mode on startup, + * so we need to reset the GPU back to legacy mode. + */ + intel_gpu_reset(dev); + } } static void @@ -4995,7 +5036,7 @@ init_ring_lists(struct intel_engine_cs *ring) } void -i915_gem_load(struct drm_device *dev) +i915_gem_load_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; @@ -5061,11 +5102,18 @@ i915_gem_load(struct drm_device *dev) dev_priv->mm.interruptible = true; - i915_gem_shrinker_init(dev_priv); - mutex_init(&dev_priv->fb_tracking.lock); } +void i915_gem_load_cleanup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + kmem_cache_destroy(dev_priv->requests); + kmem_cache_destroy(dev_priv->vmas); + kmem_cache_destroy(dev_priv->objects); +} + void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -5112,6 +5160,8 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); + file_priv->bsd_ring = -1; + ret = i915_gem_context_open(dev, file); if (ret) kfree(file_priv); @@ -5261,7 +5311,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) struct page *page; /* Only default objects have per-page dirty tracking */ - if (WARN_ON(obj->ops != &i915_gem_object_ops)) + if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) return NULL; page = i915_gem_object_get_page(obj, n); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index c25083c78ba7..83a097c94911 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -321,6 +321,18 @@ err_destroy: return ERR_PTR(ret); } +static void i915_gem_context_unpin(struct intel_context *ctx, + struct intel_engine_cs *engine) +{ + if (i915.enable_execlists) { + intel_lr_context_unpin(ctx, engine); + } else { + if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) + i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); + i915_gem_context_unreference(ctx); + } +} + void i915_gem_context_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -329,40 +341,31 @@ void i915_gem_context_reset(struct drm_device *dev) if (i915.enable_execlists) { struct intel_context *ctx; - list_for_each_entry(ctx, &dev_priv->context_list, link) { + list_for_each_entry(ctx, &dev_priv->context_list, link) intel_lr_context_reset(dev, ctx); - } - - return; } for (i = 0; i < I915_NUM_RINGS; i++) { struct intel_engine_cs *ring = &dev_priv->ring[i]; - struct intel_context *lctx = ring->last_context; - - if (lctx) { - if (lctx->legacy_hw_ctx.rcs_state && i == RCS) - i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state); - i915_gem_context_unreference(lctx); + if (ring->last_context) { + i915_gem_context_unpin(ring->last_context, ring); ring->last_context = NULL; } - - /* Force the GPU state to be reinitialised on enabling */ - if (ring->default_context) - ring->default_context->legacy_hw_ctx.initialized = false; } + + /* Force the GPU state to be reinitialised on enabling */ + dev_priv->kernel_context->legacy_hw_ctx.initialized = false; } int i915_gem_context_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_context *ctx; - int i; /* Init should only be called once per module load. Eventually the * restriction on the context_disabled check can be loosened. */ - if (WARN_ON(dev_priv->ring[RCS].default_context)) + if (WARN_ON(dev_priv->kernel_context)) return 0; if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { @@ -392,12 +395,7 @@ int i915_gem_context_init(struct drm_device *dev) return PTR_ERR(ctx); } - for (i = 0; i < I915_NUM_RINGS; i++) { - struct intel_engine_cs *ring = &dev_priv->ring[i]; - - /* NB: RCS will hold a ref for all rings */ - ring->default_context = ctx; - } + dev_priv->kernel_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", i915.enable_execlists ? "LR" : @@ -408,7 +406,7 @@ int i915_gem_context_init(struct drm_device *dev) void i915_gem_context_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *dctx = dev_priv->ring[RCS].default_context; + struct intel_context *dctx = dev_priv->kernel_context; int i; if (dctx->legacy_hw_ctx.rcs_state) { @@ -424,28 +422,21 @@ void i915_gem_context_fini(struct drm_device *dev) * to offset the do_switch part, so that i915_gem_context_unreference() * can then free the base object correctly. */ WARN_ON(!dev_priv->ring[RCS].last_context); - if (dev_priv->ring[RCS].last_context == dctx) { - /* Fake switch to NULL context */ - WARN_ON(dctx->legacy_hw_ctx.rcs_state->active); - i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); - i915_gem_context_unreference(dctx); - dev_priv->ring[RCS].last_context = NULL; - } i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); } - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = I915_NUM_RINGS; --i >= 0;) { struct intel_engine_cs *ring = &dev_priv->ring[i]; - if (ring->last_context) - i915_gem_context_unreference(ring->last_context); - - ring->default_context = NULL; - ring->last_context = NULL; + if (ring->last_context) { + i915_gem_context_unpin(ring->last_context, ring); + ring->last_context = NULL; + } } i915_gem_context_unreference(dctx); + dev_priv->kernel_context = NULL; } int i915_gem_context_enable(struct drm_i915_gem_request *req) diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index e9c2bfd85b52..1f3eef6fb345 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -193,10 +193,26 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) { - return -EINVAL; + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + int ret; + + if (obj->base.size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (!obj->base.filp) + return -ENODEV; + + ret = obj->base.filp->f_op->mmap(obj->base.filp, vma); + if (ret) + return ret; + + fput(vma->vm_file); + vma->vm_file = get_file(obj->base.filp); + + return 0; } -static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) +static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; @@ -212,6 +228,27 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size return ret; } +static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + bool was_interruptible; + int ret; + + mutex_lock(&dev->struct_mutex); + was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; + + ret = i915_gem_object_set_to_gtt_domain(obj, false); + + dev_priv->mm.interruptible = was_interruptible; + mutex_unlock(&dev->struct_mutex); + + if (unlikely(ret)) + DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n"); +} + static const struct dma_buf_ops i915_dmabuf_ops = { .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf, @@ -224,6 +261,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .vmap = i915_gem_dmabuf_vmap, .vunmap = i915_gem_dmabuf_vunmap, .begin_cpu_access = i915_gem_begin_cpu_access, + .end_cpu_access = i915_gem_end_cpu_access, }; struct dma_buf *i915_gem_prime_export(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index dccb517361b3..8fd00d279447 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -193,13 +193,10 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) return eb->lut[handle]; } else { struct hlist_head *head; - struct hlist_node *node; + struct i915_vma *vma; head = &eb->buckets[handle & eb->and]; - hlist_for_each(node, head) { - struct i915_vma *vma; - - vma = hlist_entry(node, struct i915_vma, exec_node); + hlist_for_each_entry(vma, head, exec_node) { if (vma->exec_handle == handle) return vma; } @@ -1309,6 +1306,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, exec_start = params->batch_obj_vm_offset + params->args_batch_start_offset; + if (exec_len == 0) + exec_len = params->batch_obj->base.size; + ret = ring->dispatch_execbuffer(params->request, exec_start, exec_len, params->dispatch_flags); @@ -1325,33 +1325,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, /** * Find one BSD ring to dispatch the corresponding BSD command. - * The Ring ID is returned. + * The ring index is returned. */ -static int gen8_dispatch_bsd_ring(struct drm_device *dev, - struct drm_file *file) +static unsigned int +gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_file_private *file_priv = file->driver_priv; - /* Check whether the file_priv is using one ring */ - if (file_priv->bsd_ring) - return file_priv->bsd_ring->id; - else { - /* If no, use the ping-pong mechanism to select one ring */ - int ring_id; - - mutex_lock(&dev->struct_mutex); - if (dev_priv->mm.bsd_ring_dispatch_index == 0) { - ring_id = VCS; - dev_priv->mm.bsd_ring_dispatch_index = 1; - } else { - ring_id = VCS2; - dev_priv->mm.bsd_ring_dispatch_index = 0; - } - file_priv->bsd_ring = &dev_priv->ring[ring_id]; - mutex_unlock(&dev->struct_mutex); - return ring_id; + /* Check whether the file_priv has already selected one ring. */ + if ((int)file_priv->bsd_ring < 0) { + /* If not, use the ping-pong mechanism to select one. */ + mutex_lock(&dev_priv->dev->struct_mutex); + file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; + dev_priv->mm.bsd_ring_dispatch_index ^= 1; + mutex_unlock(&dev_priv->dev->struct_mutex); } + + return file_priv->bsd_ring; } static struct drm_i915_gem_object * @@ -1374,6 +1364,64 @@ eb_get_batch(struct eb_vmas *eb) return vma->obj; } +#define I915_USER_RINGS (4) + +static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = { + [I915_EXEC_DEFAULT] = RCS, + [I915_EXEC_RENDER] = RCS, + [I915_EXEC_BLT] = BCS, + [I915_EXEC_BSD] = VCS, + [I915_EXEC_VEBOX] = VECS +}; + +static int +eb_select_ring(struct drm_i915_private *dev_priv, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args, + struct intel_engine_cs **ring) +{ + unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; + + if (user_ring_id > I915_USER_RINGS) { + DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id); + return -EINVAL; + } + + if ((user_ring_id != I915_EXEC_BSD) && + ((args->flags & I915_EXEC_BSD_MASK) != 0)) { + DRM_DEBUG("execbuf with non bsd ring but with invalid " + "bsd dispatch flags: %d\n", (int)(args->flags)); + return -EINVAL; + } + + if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) { + unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; + + if (bsd_idx == I915_EXEC_BSD_DEFAULT) { + bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file); + } else if (bsd_idx >= I915_EXEC_BSD_RING1 && + bsd_idx <= I915_EXEC_BSD_RING2) { + bsd_idx >>= I915_EXEC_BSD_SHIFT; + bsd_idx--; + } else { + DRM_DEBUG("execbuf with unknown bsd ring: %u\n", + bsd_idx); + return -EINVAL; + } + + *ring = &dev_priv->ring[_VCS(bsd_idx)]; + } else { + *ring = &dev_priv->ring[user_ring_map[user_ring_id]]; + } + + if (!intel_ring_initialized(*ring)) { + DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id); + return -EINVAL; + } + + return 0; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, @@ -1381,6 +1429,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec) { struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_request *req = NULL; struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_exec_object2 shadow_exec_entry; @@ -1411,51 +1460,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->flags & I915_EXEC_IS_PINNED) dispatch_flags |= I915_DISPATCH_PINNED; - if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { - DRM_DEBUG("execbuf with unknown ring: %d\n", - (int)(args->flags & I915_EXEC_RING_MASK)); - return -EINVAL; - } - - if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) && - ((args->flags & I915_EXEC_BSD_MASK) != 0)) { - DRM_DEBUG("execbuf with non bsd ring but with invalid " - "bsd dispatch flags: %d\n", (int)(args->flags)); - return -EINVAL; - } - - if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) - ring = &dev_priv->ring[RCS]; - else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) { - if (HAS_BSD2(dev)) { - int ring_id; - - switch (args->flags & I915_EXEC_BSD_MASK) { - case I915_EXEC_BSD_DEFAULT: - ring_id = gen8_dispatch_bsd_ring(dev, file); - ring = &dev_priv->ring[ring_id]; - break; - case I915_EXEC_BSD_RING1: - ring = &dev_priv->ring[VCS]; - break; - case I915_EXEC_BSD_RING2: - ring = &dev_priv->ring[VCS2]; - break; - default: - DRM_DEBUG("execbuf with unknown bsd ring: %d\n", - (int)(args->flags & I915_EXEC_BSD_MASK)); - return -EINVAL; - } - } else - ring = &dev_priv->ring[VCS]; - } else - ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1]; - - if (!intel_ring_initialized(ring)) { - DRM_DEBUG("execbuf with invalid ring: %d\n", - (int)(args->flags & I915_EXEC_RING_MASK)); - return -EINVAL; - } + ret = eb_select_ring(dev_priv, file, args, &ring); + if (ret) + return ret; if (args->buffer_count < 1) { DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); @@ -1602,11 +1609,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); /* Allocate a request for this batch buffer nice and early. */ - ret = i915_gem_request_alloc(ring, ctx, ¶ms->request); - if (ret) + req = i915_gem_request_alloc(ring, ctx); + if (IS_ERR(req)) { + ret = PTR_ERR(req); goto err_batch_unpin; + } - ret = i915_gem_request_add_to_client(params->request, file); + ret = i915_gem_request_add_to_client(req, file); if (ret) goto err_batch_unpin; @@ -1622,6 +1631,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->dispatch_flags = dispatch_flags; params->batch_obj = batch_obj; params->ctx = ctx; + params->request = req; ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); @@ -1645,8 +1655,8 @@ err: * must be freed again. If it was submitted then it is being tracked * on the active request list and no clean up is required here. */ - if (ret && params->request) - i915_gem_request_cancel(params->request); + if (ret && !IS_ERR_OR_NULL(req)) + i915_gem_request_cancel(req); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56f4f2e58d53..9127f8f3561c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -96,9 +96,11 @@ static int i915_get_ggtt_vma_pages(struct i915_vma *vma); -const struct i915_ggtt_view i915_ggtt_view_normal; +const struct i915_ggtt_view i915_ggtt_view_normal = { + .type = I915_GGTT_VIEW_NORMAL, +}; const struct i915_ggtt_view i915_ggtt_view_rotated = { - .type = I915_GGTT_VIEW_ROTATED + .type = I915_GGTT_VIEW_ROTATED, }; static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) @@ -2130,6 +2132,25 @@ static void i915_address_space_init(struct i915_address_space *vm, list_add_tail(&vm->global_link, &dev_priv->vm_list); } +static void gtt_write_workarounds(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* This function is for gtt related workarounds. This function is + * called on driver load and after a GPU reset, so you can place + * workarounds here even if they get overwritten by GPU reset. + */ + /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */ + if (IS_BROADWELL(dev)) + I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); + else if (IS_CHERRYVIEW(dev)) + I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); + else if (IS_SKYLAKE(dev)) + I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); + else if (IS_BROXTON(dev)) + I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); +} + int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2146,6 +2167,8 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) int i915_ppgtt_init_hw(struct drm_device *dev) { + gtt_write_workarounds(dev); + /* In the case of execlists, PPGTT is enabled by the context descriptor * and the PDPs are contained within the context itself. We don't * need to do anything here. */ @@ -2807,6 +2830,8 @@ void i915_global_gtt_cleanup(struct drm_device *dev) ppgtt->base.cleanup(&ppgtt->base); } + i915_gem_cleanup_stolen(dev); + if (drm_mm_initialized(&vm->mm)) { if (intel_vgpu_active(dev)) intel_vgt_deballoon(); @@ -3179,6 +3204,14 @@ int i915_gem_gtt_init(struct drm_device *dev) if (ret) return ret; + /* + * Initialise stolen early so that we may reserve preallocated + * objects for the BIOS to KMS transition. + */ + ret = i915_gem_init_stolen(dev); + if (ret) + goto out_gtt_cleanup; + /* GMADR is the PCI mmio aperture into the global GTT. */ DRM_INFO("Memory usable by graphics device = %lluM\n", gtt->base.total >> 20); @@ -3198,6 +3231,11 @@ int i915_gem_gtt_init(struct drm_device *dev) DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); return 0; + +out_gtt_cleanup: + gtt->base.cleanup(&dev_priv->gtt.base); + + return ret; } void i915_gem_restore_gtt_mappings(struct drm_device *dev) @@ -3329,8 +3367,9 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, } static struct scatterlist * -rotate_pages(dma_addr_t *in, unsigned int offset, +rotate_pages(const dma_addr_t *in, unsigned int offset, unsigned int width, unsigned int height, + unsigned int stride, struct sg_table *st, struct scatterlist *sg) { unsigned int column, row; @@ -3342,7 +3381,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset, } for (column = 0; column < width; column++) { - src_idx = width * (height - 1) + column; + src_idx = stride * (height - 1) + column; for (row = 0; row < height; row++) { st->nents++; /* We don't need the pages, but need to initialize @@ -3353,7 +3392,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset, sg_dma_address(sg) = in[offset + src_idx]; sg_dma_len(sg) = PAGE_SIZE; sg = sg_next(sg); - src_idx -= width; + src_idx -= stride; } } @@ -3361,10 +3400,9 @@ rotate_pages(dma_addr_t *in, unsigned int offset, } static struct sg_table * -intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, +intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { - struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info; unsigned int size_pages = rot_info->size >> PAGE_SHIFT; unsigned int size_pages_uv; struct sg_page_iter sg_iter; @@ -3406,6 +3444,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, /* Rotate the pages. */ sg = rotate_pages(page_addr_list, 0, rot_info->width_pages, rot_info->height_pages, + rot_info->width_pages, st, NULL); /* Append the UV plane if NV12. */ @@ -3421,6 +3460,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, rotate_pages(page_addr_list, uv_start_page, rot_info->width_pages_uv, rot_info->height_pages_uv, + rot_info->width_pages_uv, st, sg); } @@ -3502,7 +3542,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) vma->ggtt_view.pages = vma->obj->pages; else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) vma->ggtt_view.pages = - intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj); + intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj); else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL) vma->ggtt_view.pages = intel_partial_pages(&vma->ggtt_view, vma->obj); @@ -3596,7 +3636,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, if (view->type == I915_GGTT_VIEW_NORMAL) { return obj->base.size; } else if (view->type == I915_GGTT_VIEW_ROTATED) { - return view->params.rotation_info.size; + return view->params.rotated.size; } else if (view->type == I915_GGTT_VIEW_PARTIAL) { return view->params.partial.size << PAGE_SHIFT; } else { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index b448ad832dcf..66a6da2396a2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t; #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) - /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) @@ -156,7 +155,7 @@ struct i915_ggtt_view { u64 offset; unsigned int size; } partial; - struct intel_rotation_info rotation_info; + struct intel_rotation_info rotated; } params; struct sg_table *pages; @@ -343,6 +342,8 @@ struct i915_gtt { size_t stolen_size; /* Total size of stolen memory */ size_t stolen_usable_size; /* Total size minus BIOS reserved */ + size_t stolen_reserved_base; + size_t stolen_reserved_size; u64 mappable_end; /* End offset that we can CPU map */ struct io_mapping *mappable; /* Mapping to our CPU mappable region */ phys_addr_t mappable_base; /* PA of our GMADR */ diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index f7df54a8ee2b..58c1e592bbdb 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -47,6 +47,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) #endif } +static int num_vma_bound(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + int count = 0; + + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (drm_mm_node_allocated(&vma->node)) + count++; + if (vma->pin_count) + count++; + } + + return count; +} + +static bool swap_available(void) +{ + return get_nr_swap_pages() > 0; +} + +static bool can_release_pages(struct drm_i915_gem_object *obj) +{ + /* Only report true if by unbinding the object and putting its pages + * we can actually make forward progress towards freeing physical + * pages. + * + * If the pages are pinned for any other reason than being bound + * to the GPU, simply unbinding from the GPU is not going to succeed + * in releasing our pin count on the pages themselves. + */ + if (obj->pages_pin_count != num_vma_bound(obj)) + return false; + + /* We can only return physical pages to the system if we can either + * discard the contents (because the user has marked them as being + * purgeable) or if we can move their contents out to swap. + */ + return swap_available() || obj->madv == I915_MADV_DONTNEED; +} + /** * i915_gem_shrink - Shrink buffer object caches * @dev_priv: i915 device @@ -129,6 +169,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) continue; + if (!can_release_pages(obj)) + continue; + drm_gem_object_reference(&obj->base); /* For the unbound phase, this should be a no-op! */ @@ -188,21 +231,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) return true; } -static int num_vma_bound(struct drm_i915_gem_object *obj) -{ - struct i915_vma *vma; - int count = 0; - - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (drm_mm_node_allocated(&vma->node)) - count++; - if (vma->pin_count) - count++; - } - - return count; -} - static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { @@ -222,7 +250,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) count += obj->base.size >> PAGE_SHIFT; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!obj->active && obj->pages_pin_count == num_vma_bound(obj)) + if (!obj->active && can_release_pages(obj)) count += obj->base.size >> PAGE_SHIFT; } @@ -339,8 +367,20 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; - register_shrinker(&dev_priv->mm.shrinker); + WARN_ON(register_shrinker(&dev_priv->mm.shrinker)); dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; - register_oom_notifier(&dev_priv->mm.oom_notifier); + WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier)); +} + +/** + * i915_gem_shrinker_cleanup - Clean up i915 shrinker + * @dev_priv: i915 device + * + * This function unregisters the i915 shrinker and OOM handler. + */ +void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv) +{ + WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); + unregister_shrinker(&dev_priv->mm.shrinker); } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 3476877fc0d6..ba1a00d815d3 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -458,6 +458,9 @@ int i915_gem_init_stolen(struct drm_device *dev) return 0; } + dev_priv->gtt.stolen_reserved_base = reserved_base; + dev_priv->gtt.stolen_reserved_size = reserved_size; + /* It is possible for the reserved area to end before the end of stolen * memory, so just consider the start. */ reserved_total = stolen_top - reserved_base; @@ -569,6 +572,9 @@ _i915_gem_object_create_stolen(struct drm_device *dev, if (obj->pages == NULL) goto cleanup; + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + i915_gem_object_pin_pages(obj); obj->stolen = stolen; diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 19fb0bddc1cd..7107f2fd38f5 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -49,21 +49,18 @@ struct i915_mmu_notifier { struct hlist_node node; struct mmu_notifier mn; struct rb_root objects; - struct list_head linear; - bool has_linear; }; struct i915_mmu_object { struct i915_mmu_notifier *mn; + struct drm_i915_gem_object *obj; struct interval_tree_node it; struct list_head link; - struct drm_i915_gem_object *obj; struct work_struct work; - bool active; - bool is_linear; + bool attached; }; -static void __cancel_userptr__worker(struct work_struct *work) +static void cancel_userptr(struct work_struct *work) { struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); struct drm_i915_gem_object *obj = mo->obj; @@ -94,24 +91,22 @@ static void __cancel_userptr__worker(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -static unsigned long cancel_userptr(struct i915_mmu_object *mo) +static void add_object(struct i915_mmu_object *mo) { - unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size; - - /* The mmu_object is released late when destroying the - * GEM object so it is entirely possible to gain a - * reference on an object in the process of being freed - * since our serialisation is via the spinlock and not - * the struct_mutex - and consequently use it after it - * is freed and then double free it. - */ - if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) { - schedule_work(&mo->work); - /* only schedule one work packet to avoid the refleak */ - mo->active = false; - } + if (mo->attached) + return; + + interval_tree_insert(&mo->it, &mo->mn->objects); + mo->attached = true; +} - return end; +static void del_object(struct i915_mmu_object *mo) +{ + if (!mo->attached) + return; + + interval_tree_remove(&mo->it, &mo->mn->objects); + mo->attached = false; } static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, @@ -122,28 +117,36 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); struct i915_mmu_object *mo; + struct interval_tree_node *it; + LIST_HEAD(cancelled); + + if (RB_EMPTY_ROOT(&mn->objects)) + return; /* interval ranges are inclusive, but invalidate range is exclusive */ end--; spin_lock(&mn->lock); - if (mn->has_linear) { - list_for_each_entry(mo, &mn->linear, link) { - if (mo->it.last < start || mo->it.start > end) - continue; - - cancel_userptr(mo); - } - } else { - struct interval_tree_node *it; + it = interval_tree_iter_first(&mn->objects, start, end); + while (it) { + /* The mmu_object is released late when destroying the + * GEM object so it is entirely possible to gain a + * reference on an object in the process of being freed + * since our serialisation is via the spinlock and not + * the struct_mutex - and consequently use it after it + * is freed and then double free it. To prevent that + * use-after-free we only acquire a reference on the + * object if it is not in the process of being destroyed. + */ + mo = container_of(it, struct i915_mmu_object, it); + if (kref_get_unless_zero(&mo->obj->base.refcount)) + schedule_work(&mo->work); - it = interval_tree_iter_first(&mn->objects, start, end); - while (it) { - mo = container_of(it, struct i915_mmu_object, it); - start = cancel_userptr(mo); - it = interval_tree_iter_next(it, start, end); - } + list_add(&mo->link, &cancelled); + it = interval_tree_iter_next(it, start, end); } + list_for_each_entry(mo, &cancelled, link) + del_object(mo); spin_unlock(&mn->lock); } @@ -164,8 +167,6 @@ i915_mmu_notifier_create(struct mm_struct *mm) spin_lock_init(&mn->lock); mn->mn.ops = &i915_gem_userptr_notifier; mn->objects = RB_ROOT; - INIT_LIST_HEAD(&mn->linear); - mn->has_linear = false; /* Protected by mmap_sem (write-lock) */ ret = __mmu_notifier_register(&mn->mn, mm); @@ -177,85 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm) return mn; } -static int -i915_mmu_notifier_add(struct drm_device *dev, - struct i915_mmu_notifier *mn, - struct i915_mmu_object *mo) -{ - struct interval_tree_node *it; - int ret = 0; - - /* By this point we have already done a lot of expensive setup that - * we do not want to repeat just because the caller (e.g. X) has a - * signal pending (and partly because of that expensive setup, X - * using an interrupt timer is likely to get stuck in an EINTR loop). - */ - mutex_lock(&dev->struct_mutex); - - /* Make sure we drop the final active reference (and thereby - * remove the objects from the interval tree) before we do - * the check for overlapping objects. - */ - i915_gem_retire_requests(dev); - - spin_lock(&mn->lock); - it = interval_tree_iter_first(&mn->objects, - mo->it.start, mo->it.last); - if (it) { - struct drm_i915_gem_object *obj; - - /* We only need to check the first object in the range as it - * either has cancelled gup work queued and we need to - * return back to the user to give time for the gup-workers - * to flush their object references upon which the object will - * be removed from the interval-tree, or the the range is - * still in use by another client and the overlap is invalid. - * - * If we do have an overlap, we cannot use the interval tree - * for fast range invalidation. - */ - - obj = container_of(it, struct i915_mmu_object, it)->obj; - if (!obj->userptr.workers) - mn->has_linear = mo->is_linear = true; - else - ret = -EAGAIN; - } else - interval_tree_insert(&mo->it, &mn->objects); - - if (ret == 0) - list_add(&mo->link, &mn->linear); - - spin_unlock(&mn->lock); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn) -{ - struct i915_mmu_object *mo; - - list_for_each_entry(mo, &mn->linear, link) - if (mo->is_linear) - return true; - - return false; -} - -static void -i915_mmu_notifier_del(struct i915_mmu_notifier *mn, - struct i915_mmu_object *mo) -{ - spin_lock(&mn->lock); - list_del(&mo->link); - if (mo->is_linear) - mn->has_linear = i915_mmu_notifier_has_linear(mn); - else - interval_tree_remove(&mo->it, &mn->objects); - spin_unlock(&mn->lock); -} - static void i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) { @@ -265,7 +187,9 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) if (mo == NULL) return; - i915_mmu_notifier_del(mo->mn, mo); + spin_lock(&mo->mn->lock); + del_object(mo); + spin_unlock(&mo->mn->lock); kfree(mo); obj->userptr.mmu_object = NULL; @@ -299,7 +223,6 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, { struct i915_mmu_notifier *mn; struct i915_mmu_object *mo; - int ret; if (flags & I915_USERPTR_UNSYNCHRONIZED) return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; @@ -316,16 +239,10 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, return -ENOMEM; mo->mn = mn; - mo->it.start = obj->userptr.ptr; - mo->it.last = mo->it.start + obj->base.size - 1; mo->obj = obj; - INIT_WORK(&mo->work, __cancel_userptr__worker); - - ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); - if (ret) { - kfree(mo); - return ret; - } + mo->it.start = obj->userptr.ptr; + mo->it.last = obj->userptr.ptr + obj->base.size - 1; + INIT_WORK(&mo->work, cancel_userptr); obj->userptr.mmu_object = mo; return 0; @@ -552,8 +469,10 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, /* In order to serialise get_pages with an outstanding * cancel_userptr, we must drop the struct_mutex and try again. */ - if (!value || !work_pending(&obj->userptr.mmu_object->work)) - obj->userptr.mmu_object->active = value; + if (!value) + del_object(obj->userptr.mmu_object); + else if (!work_pending(&obj->userptr.mmu_object->work)) + add_object(obj->userptr.mmu_object); else ret = -EAGAIN; spin_unlock(&obj->userptr.mmu_object->mn->lock); @@ -789,9 +708,10 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) } static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { - .dmabuf_export = i915_gem_userptr_dmabuf_export, + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, .get_pages = i915_gem_userptr_get_pages, .put_pages = i915_gem_userptr_put_pages, + .dmabuf_export = i915_gem_userptr_dmabuf_export, .release = i915_gem_userptr_release, }; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 06ca4082735b..978c026963b8 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -365,6 +365,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "Reset count: %u\n", error->reset_count); err_printf(m, "Suspend count: %u\n", error->suspend_count); err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); + err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision); + err_printf(m, "PCI Subsystem: %04x:%04x\n", + dev->pdev->subsystem_vendor, + dev->pdev->subsystem_device); err_printf(m, "IOMMU enabled?: %d\n", error->iommu); if (HAS_CSR(dev)) { @@ -1050,7 +1054,7 @@ static void i915_gem_record_rings(struct drm_device *dev, if (request) rbuf = request->ctx->engine[ring->id].ringbuf; else - rbuf = ring->default_context->engine[ring->id].ringbuf; + rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf; } else rbuf = ring->buffer; diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index 685c7991e24f..e4ba5822289b 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h @@ -40,6 +40,7 @@ #define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) #define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) +#define SOFT_SCRATCH_COUNT 16 #define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4) #define UOS_RSA_SCRATCH_MAX_COUNT 64 diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 05aa7e61cbe0..d7543efc8a5e 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -158,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ - if (!intel_enable_rc6(dev_priv->dev) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1) || - (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) || - (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0))) + if (!intel_enable_rc6(dev) || + NEEDS_WaRsDisableCoarsePowerGating(dev)) data[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ @@ -246,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) db_exc.cookie = 1; } + /* Finally, update the cached copy of the GuC's WQ head */ + gc->wq_head = desc->head; + kunmap_atomic(base); return ret; } @@ -375,6 +376,8 @@ static void guc_init_proc_desc(struct intel_guc *guc, static void guc_init_ctx_desc(struct intel_guc *guc, struct i915_guc_client *client) { + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_engine_cs *ring; struct intel_context *ctx = client->owner; struct guc_context_desc desc; struct sg_table *sg; @@ -387,10 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.priority = client->priority; desc.db_id = client->doorbell_id; - for (i = 0; i < I915_NUM_RINGS; i++) { - struct guc_execlist_context *lrc = &desc.lrc[i]; - struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; - struct intel_engine_cs *ring; + for_each_ring(ring, dev_priv, i) { + struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id]; struct drm_i915_gem_object *obj; uint64_t ctx_desc; @@ -405,7 +406,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc, if (!obj) break; /* XXX: continue? */ - ring = ringbuf->ring; ctx_desc = intel_lr_context_descriptor(ctx, ring); lrc->context_desc = (u32)ctx_desc; @@ -413,16 +413,16 @@ static void guc_init_ctx_desc(struct intel_guc *guc, lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | - (ring->id << GUC_ELC_ENGINE_OFFSET); + (ring->guc_id << GUC_ELC_ENGINE_OFFSET); - obj = ringbuf->obj; + obj = ctx->engine[i].ringbuf->obj; lrc->ring_begin = i915_gem_obj_ggtt_offset(obj); lrc->ring_end = lrc->ring_begin + obj->base.size - 1; lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_current_tail_pointer_value = 0; - desc.engines_used |= (1 << ring->id); + desc.engines_used |= (1 << ring->guc_id); } WARN_ON(desc.engines_used == 0); @@ -471,28 +471,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, sizeof(desc) * client->ctx_index); } -/* Get valid workqueue item and return it back to offset */ -static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset) +int i915_guc_wq_check_space(struct i915_guc_client *gc) { struct guc_process_desc *desc; void *base; u32 size = sizeof(struct guc_wq_item); int ret = -ETIMEDOUT, timeout_counter = 200; + if (!gc) + return 0; + + /* Quickly return if wq space is available since last time we cache the + * head position. */ + if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) + return 0; + base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); desc = base + gc->proc_desc_offset; while (timeout_counter-- > 0) { - if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { - *offset = gc->wq_tail; + gc->wq_head = desc->head; - /* advance the tail for next workqueue item */ - gc->wq_tail += size; - gc->wq_tail &= gc->wq_size - 1; - - /* this will break the loop */ - timeout_counter = 0; + if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) { ret = 0; + break; } if (timeout_counter) @@ -507,15 +509,18 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset) static int guc_add_workqueue_item(struct i915_guc_client *gc, struct drm_i915_gem_request *rq) { - enum intel_ring_id ring_id = rq->ring->id; struct guc_wq_item *wqi; void *base; - u32 tail, wq_len, wq_off = 0; - int ret; + u32 tail, wq_len, wq_off, space; + + space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size); + if (WARN_ON(space < sizeof(struct guc_wq_item))) + return -ENOSPC; /* shouldn't happen */ - ret = guc_get_workqueue_space(gc, &wq_off); - if (ret) - return ret; + /* postincrement WQ tail for next time */ + wq_off = gc->wq_tail; + gc->wq_tail += sizeof(struct guc_wq_item); + gc->wq_tail &= gc->wq_size - 1; /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -537,7 +542,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; wqi->header = WQ_TYPE_INORDER | (wq_len << WQ_LEN_SHIFT) | - (ring_id << WQ_TARGET_SHIFT) | + (rq->ring->guc_id << WQ_TARGET_SHIFT) | WQ_NO_WCFLUSH_WAIT; /* The GuC wants only the low-order word of the context descriptor */ @@ -553,29 +558,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, return 0; } -#define CTX_RING_BUFFER_START 0x08 - -/* Update the ringbuffer pointer in a saved context image */ -static void lr_context_update(struct drm_i915_gem_request *rq) -{ - enum intel_ring_id ring_id = rq->ring->id; - struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state; - struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; - struct page *page; - uint32_t *reg_state; - - BUG_ON(!ctx_obj); - WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); - WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); - - page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); - reg_state = kmap_atomic(page); - - reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); - - kunmap_atomic(reg_state); -} - /** * i915_guc_submit() - Submit commands through GuC * @client: the guc client where commands will go through @@ -587,18 +569,14 @@ int i915_guc_submit(struct i915_guc_client *client, struct drm_i915_gem_request *rq) { struct intel_guc *guc = client->guc; - enum intel_ring_id ring_id = rq->ring->id; + unsigned int engine_id = rq->ring->guc_id; int q_ret, b_ret; - /* Need this because of the deferred pin ctx and ring */ - /* Shall we move this right after ring is pinned? */ - lr_context_update(rq); - q_ret = guc_add_workqueue_item(client, rq); if (q_ret == 0) b_ret = guc_ring_doorbell(client); - client->submissions[ring_id] += 1; + client->submissions[engine_id] += 1; if (q_ret) { client->q_fail += 1; client->retcode = q_ret; @@ -608,8 +586,8 @@ int i915_guc_submit(struct i915_guc_client *client, } else { client->retcode = 0; } - guc->submissions[ring_id] += 1; - guc->last_seqno[ring_id] = rq->seqno; + guc->submissions[engine_id] += 1; + guc->last_seqno[engine_id] = rq->seqno; return q_ret; } @@ -832,6 +810,96 @@ static void guc_create_log(struct intel_guc *guc) guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; } +static void init_guc_policies(struct guc_policies *policies) +{ + struct guc_policy *policy; + u32 p, i; + + policies->dpc_promote_time = 500000; + policies->max_num_work_items = POLICY_MAX_NUM_WI; + + for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) { + for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { + policy = &policies->policy[p][i]; + + policy->execution_quantum = 1000000; + policy->preemption_time = 500000; + policy->fault_time = 250000; + policy->policy_flags = 0; + } + } + + policies->is_valid = 1; +} + +static void guc_create_ads(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_gem_object *obj; + struct guc_ads *ads; + struct guc_policies *policies; + struct guc_mmio_reg_state *reg_state; + struct intel_engine_cs *ring; + struct page *page; + u32 size, i; + + /* The ads obj includes the struct itself and buffers passed to GuC */ + size = sizeof(struct guc_ads) + sizeof(struct guc_policies) + + sizeof(struct guc_mmio_reg_state) + + GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE; + + obj = guc->ads_obj; + if (!obj) { + obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size)); + if (!obj) + return; + + guc->ads_obj = obj; + } + + page = i915_gem_object_get_page(obj, 0); + ads = kmap(page); + + /* + * The GuC requires a "Golden Context" when it reinitialises + * engines after a reset. Here we use the Render ring default + * context, which must already exist and be pinned in the GGTT, + * so its address won't change after we've told the GuC where + * to find it. + */ + ring = &dev_priv->ring[RCS]; + ads->golden_context_lrca = ring->status_page.gfx_addr; + + for_each_ring(ring, dev_priv, i) + ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring); + + /* GuC scheduling policies */ + policies = (void *)ads + sizeof(struct guc_ads); + init_guc_policies(policies); + + ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) + + sizeof(struct guc_ads); + + /* MMIO reg state */ + reg_state = (void *)policies + sizeof(struct guc_policies); + + for_each_ring(ring, dev_priv, i) { + reg_state->mmio_white_list[ring->guc_id].mmio_start = + ring->mmio_base + GUC_MMIO_WHITE_LIST_START; + + /* Nothing to be saved or restored for now. */ + reg_state->mmio_white_list[ring->guc_id].count = 0; + } + + ads->reg_state_addr = ads->scheduler_policies + + sizeof(struct guc_policies); + + ads->reg_state_buffer = ads->reg_state_addr + + sizeof(struct guc_mmio_reg_state); + + kunmap(page); +} + /* * Set up the memory resources to be shared with the GuC. At this point, * we require just one object that can be mapped through the GGTT. @@ -858,6 +926,8 @@ int i915_guc_submission_init(struct drm_device *dev) guc_create_log(guc); + guc_create_ads(guc); + return 0; } @@ -865,7 +935,7 @@ int i915_guc_submission_enable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx = dev_priv->ring[RCS].default_context; + struct intel_context *ctx = dev_priv->kernel_context; struct i915_guc_client *client; /* client for execbuf submission */ @@ -896,6 +966,9 @@ void i915_guc_submission_fini(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; + gem_release_guc_obj(dev_priv->guc.ads_obj); + guc->ads_obj = NULL; + gem_release_guc_obj(dev_priv->guc.log_obj); guc->log_obj = NULL; @@ -919,7 +992,7 @@ int intel_guc_suspend(struct drm_device *dev) if (!i915.enable_guc_submission) return 0; - ctx = dev_priv->ring[RCS].default_context; + ctx = dev_priv->kernel_context; data[0] = HOST2GUC_ACTION_ENTER_S_STATE; /* any value greater than GUC_POWER_D0 */ @@ -945,7 +1018,7 @@ int intel_guc_resume(struct drm_device *dev) if (!i915.enable_guc_submission) return 0; - ctx = dev_priv->ring[RCS].default_context; + ctx = dev_priv->kernel_context; data[0] = HOST2GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index fa8afa7860ae..25a89373df63 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2188,10 +2188,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - /* We get interrupts on unclaimed registers, so check for this before we - * do any I915_{READ,WRITE}. */ - intel_uncore_check_errors(dev); - /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); @@ -2268,43 +2264,20 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, intel_hpd_irq_handler(dev, pin_mask, long_mask); } -static irqreturn_t gen8_irq_handler(int irq, void *arg) +static irqreturn_t +gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 master_ctl; + struct drm_device *dev = dev_priv->dev; irqreturn_t ret = IRQ_NONE; - uint32_t tmp = 0; + u32 iir; enum pipe pipe; - u32 aux_mask = GEN8_AUX_CHANNEL_A; - - if (!intel_irqs_enabled(dev_priv)) - return IRQ_NONE; - - /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(dev_priv); - - if (INTEL_INFO(dev_priv)->gen >= 9) - aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | - GEN9_AUX_CHANNEL_D; - - master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); - master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; - if (!master_ctl) - goto out; - - I915_WRITE_FW(GEN8_MASTER_IRQ, 0); - - /* Find, clear, then process each source of interrupt */ - - ret = gen8_gt_irq_handler(dev_priv, master_ctl); if (master_ctl & GEN8_DE_MISC_IRQ) { - tmp = I915_READ(GEN8_DE_MISC_IIR); - if (tmp) { - I915_WRITE(GEN8_DE_MISC_IIR, tmp); + iir = I915_READ(GEN8_DE_MISC_IIR); + if (iir) { + I915_WRITE(GEN8_DE_MISC_IIR, iir); ret = IRQ_HANDLED; - if (tmp & GEN8_DE_MISC_GSE) + if (iir & GEN8_DE_MISC_GSE) intel_opregion_asle_intr(dev); else DRM_ERROR("Unexpected DE Misc interrupt\n"); @@ -2314,33 +2287,40 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } if (master_ctl & GEN8_DE_PORT_IRQ) { - tmp = I915_READ(GEN8_DE_PORT_IIR); - if (tmp) { + iir = I915_READ(GEN8_DE_PORT_IIR); + if (iir) { + u32 tmp_mask; bool found = false; - u32 hotplug_trigger = 0; - - if (IS_BROXTON(dev_priv)) - hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK; - else if (IS_BROADWELL(dev_priv)) - hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG; - I915_WRITE(GEN8_DE_PORT_IIR, tmp); + I915_WRITE(GEN8_DE_PORT_IIR, iir); ret = IRQ_HANDLED; - if (tmp & aux_mask) { + tmp_mask = GEN8_AUX_CHANNEL_A; + if (INTEL_INFO(dev_priv)->gen >= 9) + tmp_mask |= GEN9_AUX_CHANNEL_B | + GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + + if (iir & tmp_mask) { dp_aux_irq_handler(dev); found = true; } - if (hotplug_trigger) { - if (IS_BROXTON(dev)) - bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt); - else - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw); - found = true; + if (IS_BROXTON(dev_priv)) { + tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; + if (tmp_mask) { + bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); + found = true; + } + } else if (IS_BROADWELL(dev_priv)) { + tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; + if (tmp_mask) { + ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); + found = true; + } } - if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) { + if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { gmbus_irq_handler(dev); found = true; } @@ -2353,49 +2333,51 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } for_each_pipe(dev_priv, pipe) { - uint32_t pipe_iir, flip_done = 0, fault_errors = 0; + u32 flip_done, fault_errors; if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) continue; - pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); - if (pipe_iir) { - ret = IRQ_HANDLED; - I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); + if (!iir) { + DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); + continue; + } - if (pipe_iir & GEN8_PIPE_VBLANK && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + ret = IRQ_HANDLED; + I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); - if (INTEL_INFO(dev_priv)->gen >= 9) - flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; - else - flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; + if (iir & GEN8_PIPE_VBLANK && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); - if (flip_done) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + flip_done = iir; + if (INTEL_INFO(dev_priv)->gen >= 9) + flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; + else + flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; - if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev, pipe); + if (flip_done) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } - if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) - intel_cpu_fifo_underrun_irq_handler(dev_priv, - pipe); + if (iir & GEN8_PIPE_CDCLK_CRC_DONE) + hsw_pipe_crc_irq_handler(dev, pipe); + if (iir & GEN8_PIPE_FIFO_UNDERRUN) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - if (INTEL_INFO(dev_priv)->gen >= 9) - fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; - else - fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + fault_errors = iir; + if (INTEL_INFO(dev_priv)->gen >= 9) + fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - if (fault_errors) - DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", - pipe_name(pipe), - pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); - } else - DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); + if (fault_errors) + DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", + pipe_name(pipe), + fault_errors); } if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && @@ -2405,15 +2387,15 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) * scheme also closed the SDE interrupt handling race we've seen * on older pch-split platforms. But this needs testing. */ - u32 pch_iir = I915_READ(SDEIIR); - if (pch_iir) { - I915_WRITE(SDEIIR, pch_iir); + iir = I915_READ(SDEIIR); + if (iir) { + I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; if (HAS_PCH_SPT(dev_priv)) - spt_irq_handler(dev, pch_iir); + spt_irq_handler(dev, iir); else - cpt_irq_handler(dev, pch_iir); + cpt_irq_handler(dev, iir); } else { /* * Like on previous PCH there seems to be something @@ -2423,10 +2405,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } } + return ret; +} + +static irqreturn_t gen8_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 master_ctl; + irqreturn_t ret; + + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + + master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); + master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; + if (!master_ctl) + return IRQ_NONE; + + I915_WRITE_FW(GEN8_MASTER_IRQ, 0); + + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + + /* Find, clear, then process each source of interrupt */ + ret = gen8_gt_irq_handler(dev_priv, master_ctl); + ret |= gen8_de_irq_handler(dev_priv, master_ctl); + I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); POSTING_READ_FW(GEN8_MASTER_IRQ); -out: enable_rpm_wakeref_asserts(dev_priv); return ret; @@ -2949,14 +2957,44 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) ring->hangcheck.deadlock = 0; } -static enum intel_ring_hangcheck_action -ring_stuck(struct intel_engine_cs *ring, u64 acthd) +static bool subunits_stuck(struct intel_engine_cs *ring) { - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 tmp; + u32 instdone[I915_NUM_INSTDONE_REG]; + bool stuck; + int i; + + if (ring->id != RCS) + return true; + + i915_get_extra_instdone(ring->dev, instdone); + + /* There might be unstable subunit states even when + * actual head is not moving. Filter out the unstable ones by + * accumulating the undone -> done transitions and only + * consider those as progress. + */ + stuck = true; + for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { + const u32 tmp = instdone[i] | ring->hangcheck.instdone[i]; + + if (tmp != ring->hangcheck.instdone[i]) + stuck = false; + + ring->hangcheck.instdone[i] |= tmp; + } + return stuck; +} + +static enum intel_ring_hangcheck_action +head_stuck(struct intel_engine_cs *ring, u64 acthd) +{ if (acthd != ring->hangcheck.acthd) { + + /* Clear subunit states on head movement */ + memset(ring->hangcheck.instdone, 0, + sizeof(ring->hangcheck.instdone)); + if (acthd > ring->hangcheck.max_acthd) { ring->hangcheck.max_acthd = acthd; return HANGCHECK_ACTIVE; @@ -2965,6 +3003,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) return HANGCHECK_ACTIVE_LOOP; } + if (!subunits_stuck(ring)) + return HANGCHECK_ACTIVE; + + return HANGCHECK_HUNG; +} + +static enum intel_ring_hangcheck_action +ring_stuck(struct intel_engine_cs *ring, u64 acthd) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_ring_hangcheck_action ha; + u32 tmp; + + ha = head_stuck(ring, acthd); + if (ha != HANGCHECK_HUNG) + return ha; + if (IS_GEN2(dev)) return HANGCHECK_HUNG; @@ -3032,6 +3088,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work) */ DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); + /* As enabling the GPU requires fairly extensive mmio access, + * periodically arm the mmio checker to see if we are triggering + * any invalid access. + */ + intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + for_each_ring(ring, dev_priv, i) { u64 acthd; u32 seqno; @@ -3106,7 +3168,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (ring->hangcheck.score > 0) ring->hangcheck.score--; + /* Clear head and subunit states on seqno movement */ ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; + + memset(ring->hangcheck.instdone, 0, + sizeof(ring->hangcheck.instdone)); } ring->hangcheck.seqno = seqno; diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 835d6099c769..8b9f36814165 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -22,6 +22,7 @@ * IN THE SOFTWARE. */ +#include "i915_params.h" #include "i915_drv.h" struct i915_params i915 __read_mostly = { @@ -126,7 +127,8 @@ MODULE_PARM_DESC(enable_execlists, "(-1=auto [default], 0=disabled, 1=enabled)"); module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600); -MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); +MODULE_PARM_DESC(enable_psr, "Enable PSR " + "(0=disabled [default], 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode)"); module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); MODULE_PARM_DESC(preliminary_hw_support, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h new file mode 100644 index 000000000000..529929073120 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_params.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _I915_PARAMS_H_ +#define _I915_PARAMS_H_ + +#include <linux/cache.h> /* for __read_mostly */ + +struct i915_params { + int modeset; + int panel_ignore_lid; + int semaphores; + int lvds_channel_mode; + int panel_use_ssc; + int vbt_sdvo_panel_type; + int enable_rc6; + int enable_dc; + int enable_fbc; + int enable_ppgtt; + int enable_execlists; + int enable_psr; + unsigned int preliminary_hw_support; + int disable_power_well; + int enable_ips; + int invert_brightness; + int enable_cmd_parser; + int guc_log_level; + int use_mmio_flip; + int mmio_debug; + int edp_vswing; + /* leave bools at the end to not create holes */ + bool enable_hangcheck; + bool fastboot; + bool prefault_disable; + bool load_detect_test; + bool reset; + bool disable_display; + bool disable_vtd_wa; + bool enable_guc_submission; + bool verbose_state_checks; + bool nuclear_pageflip; +}; + +extern struct i915_params i915 __read_mostly; + +#endif + diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 007ae83a4086..144586ee74d5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -610,16 +610,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IOSF_BYTE_ENABLES_SHIFT 4 #define IOSF_BAR_SHIFT 1 #define IOSF_SB_BUSY (1<<0) -#define IOSF_PORT_BUNIT 0x3 -#define IOSF_PORT_PUNIT 0x4 +#define IOSF_PORT_BUNIT 0x03 +#define IOSF_PORT_PUNIT 0x04 #define IOSF_PORT_NC 0x11 #define IOSF_PORT_DPIO 0x12 -#define IOSF_PORT_DPIO_2 0x1a #define IOSF_PORT_GPIO_NC 0x13 #define IOSF_PORT_CCK 0x14 -#define IOSF_PORT_CCU 0xA9 -#define IOSF_PORT_GPS_CORE 0x48 -#define IOSF_PORT_FLISDSI 0x1B +#define IOSF_PORT_DPIO_2 0x1a +#define IOSF_PORT_FLISDSI 0x1b +#define IOSF_PORT_GPIO_SC 0x48 +#define IOSF_PORT_GPIO_SUS 0xa8 +#define IOSF_PORT_CCU 0xa9 #define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104) #define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108) @@ -1635,6 +1636,9 @@ enum skl_disp_power_wells { #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ +#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4) +#define RING_MAX_NONPRIV_SLOTS 12 + #define GEN7_TLB_RD_ADDR _MMIO(0x4700) #if 0 @@ -1711,6 +1715,11 @@ enum skl_disp_power_wells { #define FPGA_DBG _MMIO(0x42300) #define FPGA_DBG_RM_NOCLAIM (1<<31) +#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028) +#define CLAIM_ER_CLR (1 << 31) +#define CLAIM_ER_OVERFLOW (1 << 16) +#define CLAIM_ER_CTR_MASK 0xffff + #define DERRMR _MMIO(0x44050) /* Note that HBLANK events are reserved on bdw+ */ #define DERRMR_PIPEA_SCANLINE (1<<0) @@ -5940,6 +5949,7 @@ enum skl_disp_power_wells { #define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) #define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) #define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) +#define IVB_PIPE_C_DISABLE (1 << 28) #define ILK_HDCP_DISABLE (1 << 25) #define ILK_eDP_A_DISABLE (1 << 24) #define HSW_CDCLK_LIMIT (1 << 24) @@ -5986,10 +5996,19 @@ enum skl_disp_power_wells { #define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) #define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) #define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) +#define SKL_DFSM_PIPE_A_DISABLE (1 << 30) +#define SKL_DFSM_PIPE_B_DISABLE (1 << 21) +#define SKL_DFSM_PIPE_C_DISABLE (1 << 28) + +#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0) +#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14) #define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) +#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) +#define GEN8_CS_CHICKEN1 _MMIO(0x2580) + /* GEN7 chicken */ #define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) @@ -6035,6 +6054,8 @@ enum skl_disp_power_wells { #define HDC_FORCE_NON_COHERENT (1<<4) #define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10) +#define GEN8_HDC_CHICKEN1 _MMIO(0x7304) + /* GEN9 chicken */ #define SLICE_ECO_CHICKEN0 _MMIO(0x7308) #define PIXEL_MASK_CAMMING_DISABLE (1 << 14) @@ -6765,6 +6786,16 @@ enum skl_disp_power_wells { #define VLV_PMWGICZ _MMIO(0x1300a4) +#define RC6_LOCATION _MMIO(0xD40) +#define RC6_CTX_IN_DRAM (1 << 0) +#define RC6_CTX_BASE _MMIO(0xD48) +#define RC6_CTX_BASE_MASK 0xFFFFFFF0 +#define PWRCTX_MAXCNT_RCSUNIT _MMIO(0x2054) +#define PWRCTX_MAXCNT_VCSUNIT0 _MMIO(0x12054) +#define PWRCTX_MAXCNT_BCSUNIT _MMIO(0x22054) +#define PWRCTX_MAXCNT_VECSUNIT _MMIO(0x1A054) +#define PWRCTX_MAXCNT_VCSUNIT1 _MMIO(0x1C054) +#define IDLE_TIME_MASK 0xFFFFF #define FORCEWAKE _MMIO(0xA18C) #define FORCEWAKE_VLV _MMIO(0x1300b0) #define FORCEWAKE_ACK_VLV _MMIO(0x1300b4) @@ -6903,6 +6934,7 @@ enum skl_disp_power_wells { #define GEN6_RPDEUC _MMIO(0xA084) #define GEN6_RPDEUCSW _MMIO(0xA088) #define GEN6_RC_STATE _MMIO(0xA094) +#define RC6_STATE (1 << 18) #define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098) #define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C) #define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0) @@ -7514,7 +7546,7 @@ enum skl_disp_power_wells { #define DPLL_CFGCR2_PDIV_7 (4<<2) #define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) -#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2) +#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1) #define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) /* BXT display engine PLL */ @@ -8154,4 +8186,11 @@ enum skl_disp_power_wells { #define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */ #define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */ +/* gamt regs */ +#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4) +#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */ +#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */ +#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */ +#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */ + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a2aa09ce3202..34e061a9ef06 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev) dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); - } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + } else if (INTEL_INFO(dev)->gen <= 4) { dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); @@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev) I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + } else if (INTEL_INFO(dev)->gen <= 4) { I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); @@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev) } /* only restore FBC info on the platform that supports FBC*/ - intel_fbc_disable(dev_priv); + intel_fbc_global_disable(dev_priv); /* restore FBC interval */ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 37e3f0ddf8e0..c6188dddb341 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -164,7 +164,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; @@ -200,7 +200,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; @@ -521,7 +521,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, loff_t off, size_t count) { - struct device *kdev = container_of(kobj, struct device, kobj); + struct device *kdev = kobj_to_dev(kobj); struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; struct i915_error_state_file_priv error_priv; @@ -556,7 +556,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { - struct device *kdev = container_of(kobj, struct device, kobj); + struct device *kdev = kobj_to_dev(kobj); struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; int ret; diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index d0b1c9afa35e..4625f8a9ba12 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -308,5 +308,5 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) { struct intel_atomic_state *state = to_intel_atomic_state(s); drm_atomic_state_default_clear(&state->base); - state->dpll_set = false; + state->dpll_set = state->modeset = false; } diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index c6bb0fc1edfb..e0b851a0004a 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -152,9 +152,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane, intel_state->clip.x1 = 0; intel_state->clip.y1 = 0; intel_state->clip.x2 = - crtc_state->base.active ? crtc_state->pipe_src_w : 0; + crtc_state->base.enable ? crtc_state->pipe_src_w : 0; intel_state->clip.y2 = - crtc_state->base.active ? crtc_state->pipe_src_h : 0; + crtc_state->base.enable ? crtc_state->pipe_src_h : 0; if (state->fb && intel_rotation_90_or_270(state->rotation)) { if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || @@ -194,8 +194,16 @@ static void intel_plane_atomic_update(struct drm_plane *plane, struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane_state *intel_state = to_intel_plane_state(plane->state); + struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; + struct drm_crtc_state *crtc_state = + drm_atomic_get_existing_crtc_state(old_state->state, crtc); - intel_plane->commit_plane(plane, intel_state); + if (intel_state->visible) + intel_plane->update_plane(plane, + to_intel_crtc_state(crtc_state), + intel_state); + else + intel_plane->disable_plane(plane, crtc); } const struct drm_plane_helper_funcs intel_plane_helper_funcs = { diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index eba3e0f87181..bf62a19c8f69 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -31,11 +31,49 @@ #include "i915_drv.h" #include "intel_bios.h" +/** + * DOC: Video BIOS Table (VBT) + * + * The Video BIOS Table, or VBT, provides platform and board specific + * configuration information to the driver that is not discoverable or available + * through other means. The configuration is mostly related to display + * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in + * the PCI ROM. + * + * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB + * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that + * contain the actual configuration information. The VBT Header, and thus the + * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the + * BDB Header. The data blocks are concatenated after the BDB Header. The data + * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of + * data. (Block 53, the MIPI Sequence Block is an exception.) + * + * The driver parses the VBT during load. The relevant information is stored in + * driver private data for ease of use, and the actual VBT is not read after + * that. + */ + #define SLAVE_ADDR1 0x70 #define SLAVE_ADDR2 0x72 static int panel_type; +/* Get BDB block size given a pointer to Block ID. */ +static u32 _get_blocksize(const u8 *block_base) +{ + /* The MIPI Sequence Block v3+ has a separate size field. */ + if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3) + return *((const u32 *)(block_base + 4)); + else + return *((const u16 *)(block_base + 1)); +} + +/* Get BDB block size give a pointer to data after Block ID and Block Size. */ +static u32 get_blocksize(const void *block_data) +{ + return _get_blocksize(block_data - 3); +} + static const void * find_section(const void *_bdb, int section_id) { @@ -52,14 +90,8 @@ find_section(const void *_bdb, int section_id) /* walk the sections looking for section_id */ while (index + 3 < total) { current_id = *(base + index); - index++; - - current_size = *((const u16 *)(base + index)); - index += 2; - - /* The MIPI Sequence Block v3+ has a separate size field. */ - if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3) - current_size = *((const u32 *)(base + index + 1)); + current_size = _get_blocksize(base + index); + index += 3; if (index + current_size > total) return NULL; @@ -73,16 +105,6 @@ find_section(const void *_bdb, int section_id) return NULL; } -static u16 -get_blocksize(const void *p) -{ - u16 *block_ptr, block_size; - - block_ptr = (u16 *)((char *)p - 2); - block_size = *block_ptr; - return block_size; -} - static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, const struct lvds_dvo_timing *dvo_timing) @@ -675,84 +697,13 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time; } -static u8 *goto_next_sequence(u8 *data, int *size) -{ - u16 len; - int tmp = *size; - - if (--tmp < 0) - return NULL; - - /* goto first element */ - data++; - while (1) { - switch (*data) { - case MIPI_SEQ_ELEM_SEND_PKT: - /* - * skip by this element payload size - * skip elem id, command flag and data type - */ - tmp -= 5; - if (tmp < 0) - return NULL; - - data += 3; - len = *((u16 *)data); - - tmp -= len; - if (tmp < 0) - return NULL; - - /* skip by len */ - data = data + 2 + len; - break; - case MIPI_SEQ_ELEM_DELAY: - /* skip by elem id, and delay is 4 bytes */ - tmp -= 5; - if (tmp < 0) - return NULL; - - data += 5; - break; - case MIPI_SEQ_ELEM_GPIO: - tmp -= 3; - if (tmp < 0) - return NULL; - - data += 3; - break; - default: - DRM_ERROR("Unknown element\n"); - return NULL; - } - - /* end of sequence ? */ - if (*data == 0) - break; - } - - /* goto next sequence or end of block byte */ - if (--tmp < 0) - return NULL; - - data++; - - /* update amount of data left for the sequence block to be parsed */ - *size = tmp; - return data; -} - static void -parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) +parse_mipi_config(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) { const struct bdb_mipi_config *start; - const struct bdb_mipi_sequence *sequence; const struct mipi_config *config; const struct mipi_pps_data *pps; - u8 *data; - const u8 *seq_data; - int i, panel_id, seq_size; - u16 block_size; /* parse MIPI blocks only if LFP type is MIPI */ if (!dev_priv->vbt.has_mipi) @@ -798,104 +749,233 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) /* We have mandatory mipi config blocks. Initialize as generic panel */ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; +} - /* Check if we have sequence block as well */ - sequence = find_section(bdb, BDB_MIPI_SEQUENCE); - if (!sequence) { - DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n"); - return; +/* Find the sequence block and size for the given panel. */ +static const u8 * +find_panel_sequence_block(const struct bdb_mipi_sequence *sequence, + u16 panel_id, u32 *seq_size) +{ + u32 total = get_blocksize(sequence); + const u8 *data = &sequence->data[0]; + u8 current_id; + u32 current_size; + int header_size = sequence->version >= 3 ? 5 : 3; + int index = 0; + int i; + + /* skip new block size */ + if (sequence->version >= 3) + data += 4; + + for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) { + if (index + header_size > total) { + DRM_ERROR("Invalid sequence block (header)\n"); + return NULL; + } + + current_id = *(data + index); + if (sequence->version >= 3) + current_size = *((const u32 *)(data + index + 1)); + else + current_size = *((const u16 *)(data + index + 1)); + + index += header_size; + + if (index + current_size > total) { + DRM_ERROR("Invalid sequence block\n"); + return NULL; + } + + if (current_id == panel_id) { + *seq_size = current_size; + return data + index; + } + + index += current_size; } - /* Fail gracefully for forward incompatible sequence block. */ - if (sequence->version >= 3) { - DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n"); - return; + DRM_ERROR("Sequence block detected but no valid configuration\n"); + + return NULL; +} + +static int goto_next_sequence(const u8 *data, int index, int total) +{ + u16 len; + + /* Skip Sequence Byte. */ + for (index = index + 1; index < total; index += len) { + u8 operation_byte = *(data + index); + index++; + + switch (operation_byte) { + case MIPI_SEQ_ELEM_END: + return index; + case MIPI_SEQ_ELEM_SEND_PKT: + if (index + 4 > total) + return 0; + + len = *((const u16 *)(data + index + 2)) + 4; + break; + case MIPI_SEQ_ELEM_DELAY: + len = 4; + break; + case MIPI_SEQ_ELEM_GPIO: + len = 2; + break; + case MIPI_SEQ_ELEM_I2C: + if (index + 7 > total) + return 0; + len = *(data + index + 6) + 7; + break; + default: + DRM_ERROR("Unknown operation byte\n"); + return 0; + } } - DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); + return 0; +} - block_size = get_blocksize(sequence); +static int goto_next_sequence_v3(const u8 *data, int index, int total) +{ + int seq_end; + u16 len; + u32 size_of_sequence; /* - * parse the sequence block for individual sequences + * Could skip sequence based on Size of Sequence alone, but also do some + * checking on the structure. */ - dev_priv->vbt.dsi.seq_version = sequence->version; + if (total < 5) { + DRM_ERROR("Too small sequence size\n"); + return 0; + } - seq_data = &sequence->data[0]; + /* Skip Sequence Byte. */ + index++; /* - * sequence block is variable length and hence we need to parse and - * get the sequence data for specific panel id + * Size of Sequence. Excludes the Sequence Byte and the size itself, + * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END + * byte. */ - for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) { - panel_id = *seq_data; - seq_size = *((u16 *) (seq_data + 1)); - if (panel_id == panel_type) - break; + size_of_sequence = *((const uint32_t *)(data + index)); + index += 4; - /* skip the sequence including seq header of 3 bytes */ - seq_data = seq_data + 3 + seq_size; - if ((seq_data - &sequence->data[0]) > block_size) { - DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n"); - return; + seq_end = index + size_of_sequence; + if (seq_end > total) { + DRM_ERROR("Invalid sequence size\n"); + return 0; + } + + for (; index < total; index += len) { + u8 operation_byte = *(data + index); + index++; + + if (operation_byte == MIPI_SEQ_ELEM_END) { + if (index != seq_end) { + DRM_ERROR("Invalid element structure\n"); + return 0; + } + return index; + } + + len = *(data + index); + index++; + + /* + * FIXME: Would be nice to check elements like for v1/v2 in + * goto_next_sequence() above. + */ + switch (operation_byte) { + case MIPI_SEQ_ELEM_SEND_PKT: + case MIPI_SEQ_ELEM_DELAY: + case MIPI_SEQ_ELEM_GPIO: + case MIPI_SEQ_ELEM_I2C: + case MIPI_SEQ_ELEM_SPI: + case MIPI_SEQ_ELEM_PMIC: + break; + default: + DRM_ERROR("Unknown operation byte %u\n", + operation_byte); + break; } } - if (i == MAX_MIPI_CONFIGURATIONS) { - DRM_ERROR("Sequence block detected but no valid configuration\n"); + return 0; +} + +static void +parse_mipi_sequence(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) +{ + const struct bdb_mipi_sequence *sequence; + const u8 *seq_data; + u32 seq_size; + u8 *data; + int index = 0; + + /* Only our generic panel driver uses the sequence block. */ + if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID) + return; + + sequence = find_section(bdb, BDB_MIPI_SEQUENCE); + if (!sequence) { + DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n"); return; } - /* check if found sequence is completely within the sequence block - * just being paranoid */ - if (seq_size > block_size) { - DRM_ERROR("Corrupted sequence/size, bailing out\n"); + /* Fail gracefully for forward incompatible sequence block. */ + if (sequence->version >= 4) { + DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n", + sequence->version); return; } - /* skip the panel id(1 byte) and seq size(2 bytes) */ - dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL); - if (!dev_priv->vbt.dsi.data) + DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version); + + seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size); + if (!seq_data) return; - /* - * loop into the sequence data and split into multiple sequneces - * There are only 5 types of sequences as of now - */ - data = dev_priv->vbt.dsi.data; - dev_priv->vbt.dsi.size = seq_size; + data = kmemdup(seq_data, seq_size, GFP_KERNEL); + if (!data) + return; - /* two consecutive 0x00 indicate end of all sequences */ - while (1) { - int seq_id = *data; - if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) { - dev_priv->vbt.dsi.sequence[seq_id] = data; - DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id); - } else { - DRM_ERROR("undefined sequence\n"); + /* Parse the sequences, store pointers to each sequence. */ + for (;;) { + u8 seq_id = *(data + index); + if (seq_id == MIPI_SEQ_END) + break; + + if (seq_id >= MIPI_SEQ_MAX) { + DRM_ERROR("Unknown sequence %u\n", seq_id); goto err; } - /* partial parsing to skip elements */ - data = goto_next_sequence(data, &seq_size); + dev_priv->vbt.dsi.sequence[seq_id] = data + index; - if (data == NULL) { - DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n"); + if (sequence->version >= 3) + index = goto_next_sequence_v3(data, index, seq_size); + else + index = goto_next_sequence(data, index, seq_size); + if (!index) { + DRM_ERROR("Invalid sequence %u\n", seq_id); goto err; } - - if (*data == 0) - break; /* end of sequence reached */ } - DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n"); + dev_priv->vbt.dsi.data = data; + dev_priv->vbt.dsi.size = seq_size; + dev_priv->vbt.dsi.seq_version = sequence->version; + + DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n"); return; -err: - kfree(dev_priv->vbt.dsi.data); - dev_priv->vbt.dsi.data = NULL; - /* error during parsing so set all pointers to null - * because of partial parsing */ +err: + kfree(data); memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence)); } @@ -1088,7 +1168,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } - if (bdb->version < 195) { + if (bdb->version < 106) { + expected_size = 22; + } else if (bdb->version < 109) { + expected_size = 27; + } else if (bdb->version < 195) { + BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); expected_size = sizeof(struct old_child_dev_config); } else if (bdb->version == 195) { expected_size = 37; @@ -1101,18 +1186,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv, bdb->version, expected_size); } - /* The legacy sized child device config is the minimum we need. */ - if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) { - DRM_ERROR("Child device config size %u is too small.\n", - p_defs->child_dev_size); - return; - } - /* Flag an error for unexpected size, but continue anyway. */ if (p_defs->child_dev_size != expected_size) DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n", p_defs->child_dev_size, expected_size, bdb->version); + /* The legacy sized child device config is the minimum we need. */ + if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) { + DRM_DEBUG_KMS("Child device config size %u is too small.\n", + p_defs->child_dev_size); + return; + } + /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ @@ -1285,7 +1370,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) /** * intel_bios_init - find VBT and initialize settings from the BIOS - * @dev: DRM device + * @dev_priv: i915 device instance * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * to appropriate values. @@ -1337,7 +1422,8 @@ intel_bios_init(struct drm_i915_private *dev_priv) parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); parse_psr(dev_priv, bdb); - parse_mipi(dev_priv, bdb); + parse_mipi_config(dev_priv, bdb); + parse_mipi_sequence(dev_priv, bdb); parse_ddi_ports(dev_priv, bdb); if (bios) diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 54eac1003a1e..350d4e0f75a4 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -25,25 +25,43 @@ * */ -#ifndef _I830_BIOS_H_ -#define _I830_BIOS_H_ - +#ifndef _INTEL_BIOS_H_ +#define _INTEL_BIOS_H_ + +/** + * struct vbt_header - VBT Header structure + * @signature: VBT signature, always starts with "$VBT" + * @version: Version of this structure + * @header_size: Size of this structure + * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks) + * @vbt_checksum: Checksum + * @reserved0: Reserved + * @bdb_offset: Offset of &struct bdb_header from beginning of VBT + * @aim_offset: Offsets of add-in data blocks from beginning of VBT + */ struct vbt_header { - u8 signature[20]; /**< Always starts with 'VBT$' */ - u16 version; /**< decimal */ - u16 header_size; /**< in bytes */ - u16 vbt_size; /**< in bytes */ + u8 signature[20]; + u16 version; + u16 header_size; + u16 vbt_size; u8 vbt_checksum; u8 reserved0; - u32 bdb_offset; /**< from beginning of VBT */ - u32 aim_offset[4]; /**< from beginning of VBT */ + u32 bdb_offset; + u32 aim_offset[4]; } __packed; +/** + * struct bdb_header - BDB Header structure + * @signature: BDB signature "BIOS_DATA_BLOCK" + * @version: Version of the data block definitions + * @header_size: Size of this structure + * @bdb_size: Size of BDB (BDB Header and data blocks) + */ struct bdb_header { - u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ - u16 version; /**< decimal */ - u16 header_size; /**< in bytes */ - u16 bdb_size; /**< in bytes */ + u8 signature[16]; + u16 version; + u16 header_size; + u16 bdb_size; } __packed; /* strictly speaking, this is a "skip" block, but it has interesting info */ @@ -936,21 +954,29 @@ struct bdb_mipi_sequence { /* MIPI Sequnece Block definitions */ enum mipi_seq { - MIPI_SEQ_UNDEFINED = 0, + MIPI_SEQ_END = 0, MIPI_SEQ_ASSERT_RESET, MIPI_SEQ_INIT_OTP, MIPI_SEQ_DISPLAY_ON, MIPI_SEQ_DISPLAY_OFF, MIPI_SEQ_DEASSERT_RESET, + MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ + MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ + MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ + MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */ + MIPI_SEQ_POWER_ON, /* sequence block v3+ */ + MIPI_SEQ_POWER_OFF, /* sequence block v3+ */ MIPI_SEQ_MAX }; enum mipi_seq_element { - MIPI_SEQ_ELEM_UNDEFINED = 0, + MIPI_SEQ_ELEM_END = 0, MIPI_SEQ_ELEM_SEND_PKT, MIPI_SEQ_ELEM_DELAY, MIPI_SEQ_ELEM_GPIO, - MIPI_SEQ_ELEM_STATUS, + MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */ + MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */ + MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */ MIPI_SEQ_ELEM_MAX }; @@ -965,4 +991,4 @@ enum mipi_gpio_pin_index { MIPI_GPIO_MAX }; -#endif /* _I830_BIOS_H_ */ +#endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9c89df1af036..ad5dfabc452e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -216,6 +216,7 @@ intel_crt_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; + int max_dotclk = to_i915(dev)->max_dotclk_freq; int max_clock = 0; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) @@ -231,6 +232,9 @@ intel_crt_mode_valid(struct drm_connector *connector, if (mode->clock > max_clock) return MODE_CLOCK_HIGH; + if (mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */ if (HAS_PCH_LPT(dev) && (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2)) diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 9bb63a85997a..2a7ec3141c8d 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -44,6 +44,8 @@ #define I915_CSR_SKL "i915/skl_dmc_ver1.bin" #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" +#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" + MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_BXT); @@ -177,7 +179,8 @@ static const struct stepping_info kbl_stepping_info[] = { static const struct stepping_info skl_stepping_info[] = { {'A', '0'}, {'B', '0'}, {'C', '0'}, {'D', '0'}, {'E', '0'}, {'F', '0'}, - {'G', '0'}, {'H', '0'}, {'I', '0'} + {'G', '0'}, {'H', '0'}, {'I', '0'}, + {'J', '0'}, {'K', '0'} }; static const struct stepping_info bxt_stepping_info[] = { @@ -278,10 +281,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, csr->version = css_header->version; - if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) { + if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && + csr->version < SKL_CSR_VERSION_REQUIRED) { DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u," " please upgrade to v%u.%u or later" - " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n", + " [" FIRMWARE_URL "].\n", CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version), CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED), @@ -399,7 +403,10 @@ out: CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version)); } else { - DRM_ERROR("Failed to load DMC firmware, disabling rpm\n"); + dev_notice(dev_priv->dev->dev, + "Failed to load DMC firmware" + " [" FIRMWARE_URL "]," + " disabling runtime power management.\n"); } release_firmware(fw); @@ -421,7 +428,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) if (!HAS_CSR(dev_priv)) return; - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) csr->fw_path = I915_CSR_SKL; else if (IS_BROXTON(dev_priv)) csr->fw_path = I915_CSR_BXT; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index e6408e5583d7..cdf2e14aa45d 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -133,38 +133,38 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { { 0x00002016, 0x000000A0, 0x0 }, { 0x00005012, 0x0000009B, 0x0 }, { 0x00007011, 0x00000088, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80009010, 0x000000C0, 0x1 }, { 0x00002016, 0x0000009B, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80007011, 0x000000C0, 0x1 }, { 0x00002016, 0x000000DF, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80005012, 0x000000C0, 0x1 }, }; /* Skylake U */ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { { 0x0000201B, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80007011, 0x000000CD, 0x0 }, + { 0x80009010, 0x000000C0, 0x1 }, { 0x0000201B, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ - { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80005012, 0x000000C0, 0x1 }, + { 0x80007011, 0x000000C0, 0x1 }, { 0x00002016, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80005012, 0x000000C0, 0x1 }, }; /* Skylake Y */ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { { 0x00000018, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80007011, 0x000000CD, 0x0 }, + { 0x80009010, 0x000000C0, 0x3 }, { 0x00000018, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ - { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80005012, 0x000000C0, 0x3 }, + { 0x80007011, 0x000000C0, 0x3 }, { 0x00000018, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80005012, 0x000000C0, 0x3 }, }; /* @@ -226,26 +226,26 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { { 0x00000018, 0x000000A1, 0x0 }, { 0x00000018, 0x00000098, 0x0 }, { 0x00004013, 0x00000088, 0x0 }, - { 0x00006012, 0x00000087, 0x0 }, + { 0x80006012, 0x000000CD, 0x1 }, { 0x00000018, 0x000000DF, 0x0 }, - { 0x00003015, 0x00000087, 0x0 }, /* Default */ - { 0x00003015, 0x000000C7, 0x0 }, - { 0x00000018, 0x000000C7, 0x0 }, + { 0x80003015, 0x000000CD, 0x1 }, /* Default */ + { 0x80003015, 0x000000C0, 0x1 }, + { 0x80000018, 0x000000C0, 0x1 }, }; /* Skylake Y */ static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = { { 0x00000018, 0x000000A1, 0x0 }, { 0x00005012, 0x000000DF, 0x0 }, - { 0x00007011, 0x00000084, 0x0 }, + { 0x80007011, 0x000000CB, 0x3 }, { 0x00000018, 0x000000A4, 0x0 }, { 0x00000018, 0x0000009D, 0x0 }, { 0x00004013, 0x00000080, 0x0 }, - { 0x00006013, 0x000000C7, 0x0 }, + { 0x80006013, 0x000000C0, 0x3 }, { 0x00000018, 0x0000008A, 0x0 }, - { 0x00003015, 0x000000C7, 0x0 }, /* Default */ - { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */ - { 0x00000018, 0x000000C7, 0x0 }, + { 0x80003015, 0x000000C0, 0x3 }, /* Default */ + { 0x80003015, 0x000000C0, 0x3 }, + { 0x80000018, 0x000000C0, 0x3 }, }; struct bxt_ddi_buf_trans { @@ -301,8 +301,8 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */ }; -static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, - enum port port, int type); +static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type); static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, struct intel_digital_port **dig_port, @@ -342,81 +342,50 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) return port; } -static bool -intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) -{ - return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg); -} - -static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, - int *n_entries) +static const struct ddi_buf_trans * +skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) { - const struct ddi_buf_trans *ddi_translations; - - if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { - ddi_translations = skl_y_ddi_translations_dp; + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); - } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { - ddi_translations = skl_u_ddi_translations_dp; + return skl_y_ddi_translations_dp; + } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) { *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); + return skl_u_ddi_translations_dp; } else { - ddi_translations = skl_ddi_translations_dp; *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); + return skl_ddi_translations_dp; } - - return ddi_translations; } -static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev, - int *n_entries) +static const struct ddi_buf_trans * +skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { - struct drm_i915_private *dev_priv = dev->dev_private; - const struct ddi_buf_trans *ddi_translations; - - if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { - if (dev_priv->edp_low_vswing) { - ddi_translations = skl_y_ddi_translations_edp; + if (dev_priv->edp_low_vswing) { + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); - } else { - ddi_translations = skl_y_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); - } - } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { - if (dev_priv->edp_low_vswing) { - ddi_translations = skl_u_ddi_translations_edp; + return skl_y_ddi_translations_edp; + } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) { *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); + return skl_u_ddi_translations_edp; } else { - ddi_translations = skl_u_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); - } - } else { - if (dev_priv->edp_low_vswing) { - ddi_translations = skl_ddi_translations_edp; *n_entries = ARRAY_SIZE(skl_ddi_translations_edp); - } else { - ddi_translations = skl_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); + return skl_ddi_translations_edp; } } - return ddi_translations; + return skl_get_buf_trans_dp(dev_priv, n_entries); } static const struct ddi_buf_trans * -skl_get_buf_trans_hdmi(struct drm_device *dev, - int *n_entries) +skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) { - const struct ddi_buf_trans *ddi_translations; - - if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { - ddi_translations = skl_y_ddi_translations_hdmi; + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); + return skl_y_ddi_translations_hdmi; } else { - ddi_translations = skl_ddi_translations_hdmi; *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); + return skl_ddi_translations_hdmi; } - - return ddi_translations; } /* @@ -426,42 +395,52 @@ skl_get_buf_trans_hdmi(struct drm_device *dev, * in either FDI or DP modes only, as HDMI connections will work with both * of those */ -static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, - bool supports_hdmi) +void intel_prepare_ddi_buffer(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, size; - int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + int hdmi_level; + enum port port; const struct ddi_buf_trans *ddi_translations_fdi; const struct ddi_buf_trans *ddi_translations_dp; const struct ddi_buf_trans *ddi_translations_edp; const struct ddi_buf_trans *ddi_translations_hdmi; const struct ddi_buf_trans *ddi_translations; - if (IS_BROXTON(dev)) { - if (!supports_hdmi) + port = intel_ddi_get_encoder_port(encoder); + hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + + if (IS_BROXTON(dev_priv)) { + if (encoder->type != INTEL_OUTPUT_HDMI) return; /* Vswing programming for HDMI */ - bxt_ddi_vswing_sequence(dev, hdmi_level, port, + bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port, INTEL_OUTPUT_HDMI); return; - } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + } + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { ddi_translations_fdi = NULL; ddi_translations_dp = - skl_get_buf_trans_dp(dev, &n_dp_entries); + skl_get_buf_trans_dp(dev_priv, &n_dp_entries); ddi_translations_edp = - skl_get_buf_trans_edp(dev, &n_edp_entries); + skl_get_buf_trans_edp(dev_priv, &n_edp_entries); ddi_translations_hdmi = - skl_get_buf_trans_hdmi(dev, &n_hdmi_entries); + skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); hdmi_default_entry = 8; /* If we're boosting the current, set bit 31 of trans1 */ if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || dev_priv->vbt.ddi_port_info[port].dp_boost_level) iboost_bit = 1<<31; - } else if (IS_BROADWELL(dev)) { + + if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP && + port != PORT_A && port != PORT_E && + n_edp_entries > 9)) + n_edp_entries = 9; + } else if (IS_BROADWELL(dev_priv)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_edp = bdw_ddi_translations_edp; @@ -470,7 +449,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); hdmi_default_entry = 7; - } else if (IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev_priv)) { ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_dp = hsw_ddi_translations_dp; ddi_translations_edp = hsw_ddi_translations_dp; @@ -490,30 +469,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, hdmi_default_entry = 7; } - switch (port) { - case PORT_A: + switch (encoder->type) { + case INTEL_OUTPUT_EDP: ddi_translations = ddi_translations_edp; size = n_edp_entries; break; - case PORT_B: - case PORT_C: + case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_HDMI: ddi_translations = ddi_translations_dp; size = n_dp_entries; break; - case PORT_D: - if (intel_dp_is_edp(dev, PORT_D)) { - ddi_translations = ddi_translations_edp; - size = n_edp_entries; - } else { - ddi_translations = ddi_translations_dp; - size = n_dp_entries; - } - break; - case PORT_E: - if (ddi_translations_fdi) - ddi_translations = ddi_translations_fdi; - else - ddi_translations = ddi_translations_dp; + case INTEL_OUTPUT_ANALOG: + ddi_translations = ddi_translations_fdi; size = n_dp_entries; break; default: @@ -527,7 +494,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, ddi_translations[i].trans2); } - if (!supports_hdmi) + if (encoder->type != INTEL_OUTPUT_HDMI) return; /* Choose a good default if VBT is badly populated */ @@ -542,37 +509,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, ddi_translations_hdmi[hdmi_level].trans2); } -/* Program DDI buffers translations for DP. By default, program ports A-D in DP - * mode and port E for FDI. - */ -void intel_prepare_ddi(struct drm_device *dev) -{ - struct intel_encoder *intel_encoder; - bool visited[I915_MAX_PORTS] = { 0, }; - - if (!HAS_DDI(dev)) - return; - - for_each_intel_encoder(dev, intel_encoder) { - struct intel_digital_port *intel_dig_port; - enum port port; - bool supports_hdmi; - - if (intel_encoder->type == INTEL_OUTPUT_DSI) - continue; - - ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port); - if (visited[port]) - continue; - - supports_hdmi = intel_dig_port && - intel_dig_port_supports_hdmi(intel_dig_port); - - intel_prepare_ddi_buffers(dev, port, supports_hdmi); - visited[port] = true; - } -} - static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port) { @@ -601,8 +537,14 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; u32 temp, i, rx_ctl_val; + for_each_encoder_on_crtc(dev, crtc, encoder) { + WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG); + intel_prepare_ddi_buffer(encoder); + } + /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the * mode set "sequence for CRT port" document: * - TP1 to TP2 time with the default value @@ -1589,7 +1531,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | wrpll_params.central_freq; - } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { + } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || + intel_encoder->type == INTEL_OUTPUT_DP_MST) { switch (crtc_state->port_clock / 2) { case 81000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); @@ -1603,8 +1546,10 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, } cfgcr1 = cfgcr2 = 0; - } else /* eDP */ + } else if (intel_encoder->type == INTEL_OUTPUT_EDP) { return true; + } else + return false; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -2085,10 +2030,9 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) TRANS_CLK_SEL_DISABLED); } -static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, - enum port port, int type) +static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type) { - struct drm_i915_private *dev_priv = dev->dev_private; const struct ddi_buf_trans *ddi_translations; uint8_t iboost; uint8_t dp_iboost, hdmi_iboost; @@ -2103,21 +2047,26 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, if (dp_iboost) { iboost = dp_iboost; } else { - ddi_translations = skl_get_buf_trans_dp(dev, &n_entries); + ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries); iboost = ddi_translations[level].i_boost; } } else if (type == INTEL_OUTPUT_EDP) { if (dp_iboost) { iboost = dp_iboost; } else { - ddi_translations = skl_get_buf_trans_edp(dev, &n_entries); + ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries); + + if (WARN_ON(port != PORT_A && + port != PORT_E && n_entries > 9)) + n_entries = 9; + iboost = ddi_translations[level].i_boost; } } else if (type == INTEL_OUTPUT_HDMI) { if (hdmi_iboost) { iboost = hdmi_iboost; } else { - ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries); + ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries); iboost = ddi_translations[level].i_boost; } } else { @@ -2142,10 +2091,9 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); } -static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, - enum port port, int type) +static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type) { - struct drm_i915_private *dev_priv = dev->dev_private; const struct bxt_ddi_buf_trans *ddi_translations; u32 n_entries, i; uint32_t val; @@ -2260,7 +2208,7 @@ static uint32_t translate_signal_level(int signal_levels) uint32_t ddi_signal_levels(struct intel_dp *intel_dp) { struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_device *dev = dport->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); struct intel_encoder *encoder = &dport->base; uint8_t train_set = intel_dp->train_set[0]; int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | @@ -2270,10 +2218,10 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) level = translate_signal_level(signal_levels); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) - skl_ddi_set_iboost(dev, level, port, encoder->type); - else if (IS_BROXTON(dev)) - bxt_ddi_vswing_sequence(dev, level, port, encoder->type); + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + skl_ddi_set_iboost(dev_priv, level, port, encoder->type); + else if (IS_BROXTON(dev_priv)) + bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); return DDI_BUF_TRANS_SELECT(level); } @@ -2325,12 +2273,12 @@ void intel_ddi_clk_select(struct intel_encoder *encoder, static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; - int hdmi_level; + + intel_prepare_ddi_buffer(intel_encoder); if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2348,17 +2296,11 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); - if (port != PORT_A || INTEL_INFO(dev)->gen >= 9) + if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9) intel_dp_stop_link_train(intel_dp); } else if (type == INTEL_OUTPUT_HDMI) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - if (IS_BROXTON(dev)) { - hdmi_level = dev_priv->vbt. - ddi_port_info[port].hdmi_level_shift; - bxt_ddi_vswing_sequence(dev, hdmi_level, port, - INTEL_OUTPUT_HDMI); - } intel_hdmi->set_infoframes(encoder, crtc->config->has_hdmi_sink, &crtc->config->base.adjusted_mode); @@ -3282,6 +3224,33 @@ void intel_ddi_init(struct drm_device *dev, enum port port) struct intel_encoder *intel_encoder; struct drm_encoder *encoder; bool init_hdmi, init_dp; + int max_lanes; + + if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) { + switch (port) { + case PORT_A: + max_lanes = 4; + break; + case PORT_E: + max_lanes = 0; + break; + default: + max_lanes = 4; + break; + } + } else { + switch (port) { + case PORT_A: + max_lanes = 2; + break; + case PORT_E: + max_lanes = 2; + break; + default: + max_lanes = 4; + break; + } + } init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || dev_priv->vbt.ddi_port_info[port].supports_hdmi); @@ -3327,9 +3296,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port) if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) { DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n"); intel_dig_port->saved_port_bits |= DDI_A_4_LANES; + max_lanes = 4; } } + intel_dig_port->max_lanes = max_lanes; + intel_encoder->type = INTEL_OUTPUT_UNKNOWN; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->cloneable = 0; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5feb65725c04..836bbdc239b6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -85,8 +85,6 @@ static const uint32_t intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); - static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); static void ironlake_pch_clock_get(struct intel_crtc *crtc, @@ -1152,11 +1150,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) } } -static const char *state_string(bool enabled) -{ - return enabled ? "on" : "off"; -} - /* Only for pre-ILK configs */ void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -1168,7 +1161,7 @@ void assert_pll(struct drm_i915_private *dev_priv, cur_state = !!(val & DPLL_VCO_ENABLE); I915_STATE_WARN(cur_state != state, "PLL state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } /* XXX: the dsi pll is shared between MIPI DSI ports */ @@ -1184,7 +1177,7 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) cur_state = val & DSI_PLL_VCO_EN; I915_STATE_WARN(cur_state != state, "DSI PLL state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) @@ -1208,14 +1201,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool cur_state; struct intel_dpll_hw_state hw_state; - if (WARN (!pll, - "asserting DPLL %s with no DPLL\n", state_string(state))) + if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) return; cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); I915_STATE_WARN(cur_state != state, "%s assertion failure (expected %s, current %s)\n", - pll->name, state_string(state), state_string(cur_state)); + pll->name, onoff(state), onoff(cur_state)); } static void assert_fdi_tx(struct drm_i915_private *dev_priv, @@ -1235,7 +1227,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, } I915_STATE_WARN(cur_state != state, "FDI TX state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) @@ -1250,7 +1242,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv, cur_state = !!(val & FDI_RX_ENABLE); I915_STATE_WARN(cur_state != state, "FDI RX state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) @@ -1282,7 +1274,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, cur_state = !!(val & FDI_RX_PLL_ENABLE); I915_STATE_WARN(cur_state != state, "FDI RX PLL assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } void assert_panel_unlocked(struct drm_i915_private *dev_priv, @@ -1340,7 +1332,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv, I915_STATE_WARN(cur_state != state, "cursor on pipe %c assertion failure (expected %s, current %s)\n", - pipe_name(pipe), state_string(state), state_string(cur_state)); + pipe_name(pipe), onoff(state), onoff(cur_state)); } #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) @@ -1367,7 +1359,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, I915_STATE_WARN(cur_state != state, "pipe %c assertion failure (expected %s, current %s)\n", - pipe_name(pipe), state_string(state), state_string(cur_state)); + pipe_name(pipe), onoff(state), onoff(cur_state)); } static void assert_plane(struct drm_i915_private *dev_priv, @@ -1380,7 +1372,7 @@ static void assert_plane(struct drm_i915_private *dev_priv, cur_state = !!(val & DISPLAY_PLANE_ENABLE); I915_STATE_WARN(cur_state != state, "plane %c assertion failure (expected %s, current %s)\n", - plane_name(plane), state_string(state), state_string(cur_state)); + plane_name(plane), onoff(state), onoff(cur_state)); } #define assert_plane_enabled(d, p) assert_plane(d, p, true) @@ -2153,6 +2145,17 @@ static void intel_enable_pipe(struct intel_crtc *crtc) I915_WRITE(reg, val | PIPECONF_ENABLE); POSTING_READ(reg); + + /* + * Until the pipe starts DSL will read as 0, which would cause + * an apparent vblank timestamp jump, which messes up also the + * frame count when it's derived from the timestamps. So let's + * wait for the pipe to start properly before we call + * drm_crtc_vblank_on() + */ + if (dev->max_vblank_count == 0 && + wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) + DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); } /** @@ -2214,67 +2217,75 @@ static bool need_vtd_wa(struct drm_device *dev) return false; } -unsigned int -intel_tile_height(struct drm_device *dev, uint32_t pixel_format, - uint64_t fb_format_modifier, unsigned int plane) +static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) { - unsigned int tile_height; - uint32_t pixel_bytes; + return IS_GEN2(dev_priv) ? 2048 : 4096; +} - switch (fb_format_modifier) { +static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, unsigned int cpp) +{ + switch (fb_modifier) { case DRM_FORMAT_MOD_NONE: - tile_height = 1; - break; + return cpp; case I915_FORMAT_MOD_X_TILED: - tile_height = IS_GEN2(dev) ? 16 : 8; - break; + if (IS_GEN2(dev_priv)) + return 128; + else + return 512; case I915_FORMAT_MOD_Y_TILED: - tile_height = 32; - break; + if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) + return 128; + else + return 512; case I915_FORMAT_MOD_Yf_TILED: - pixel_bytes = drm_format_plane_cpp(pixel_format, plane); - switch (pixel_bytes) { - default: + switch (cpp) { case 1: - tile_height = 64; - break; + return 64; case 2: case 4: - tile_height = 32; - break; + return 128; case 8: - tile_height = 16; - break; case 16: - WARN_ONCE(1, - "128-bit pixels are not supported for display!"); - tile_height = 16; - break; + return 256; + default: + MISSING_CASE(cpp); + return cpp; } break; default: - MISSING_CASE(fb_format_modifier); - tile_height = 1; - break; + MISSING_CASE(fb_modifier); + return cpp; } +} - return tile_height; +unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, unsigned int cpp) +{ + if (fb_modifier == DRM_FORMAT_MOD_NONE) + return 1; + else + return intel_tile_size(dev_priv) / + intel_tile_width(dev_priv, fb_modifier, cpp); } unsigned int intel_fb_align_height(struct drm_device *dev, unsigned int height, - uint32_t pixel_format, uint64_t fb_format_modifier) + uint32_t pixel_format, uint64_t fb_modifier) { - return ALIGN(height, intel_tile_height(dev, pixel_format, - fb_format_modifier, 0)); + unsigned int cpp = drm_format_plane_cpp(pixel_format, 0); + unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp); + + return ALIGN(height, tile_height); } static void intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, const struct drm_plane_state *plane_state) { - struct intel_rotation_info *info = &view->params.rotation_info; - unsigned int tile_height, tile_pitch; + struct drm_i915_private *dev_priv = to_i915(fb->dev); + struct intel_rotation_info *info = &view->params.rotated; + unsigned int tile_size, tile_width, tile_height, cpp; *view = i915_ggtt_view_normal; @@ -2292,26 +2303,28 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, info->uv_offset = fb->offsets[1]; info->fb_modifier = fb->modifier[0]; - tile_height = intel_tile_height(fb->dev, fb->pixel_format, - fb->modifier[0], 0); - tile_pitch = PAGE_SIZE / tile_height; - info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch); + tile_size = intel_tile_size(dev_priv); + + cpp = drm_format_plane_cpp(fb->pixel_format, 0); + tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp); + tile_height = tile_size / tile_width; + + info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width); info->height_pages = DIV_ROUND_UP(fb->height, tile_height); - info->size = info->width_pages * info->height_pages * PAGE_SIZE; + info->size = info->width_pages * info->height_pages * tile_size; if (info->pixel_format == DRM_FORMAT_NV12) { - tile_height = intel_tile_height(fb->dev, fb->pixel_format, - fb->modifier[0], 1); - tile_pitch = PAGE_SIZE / tile_height; - info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch); - info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, - tile_height); - info->size_uv = info->width_pages_uv * info->height_pages_uv * - PAGE_SIZE; + cpp = drm_format_plane_cpp(fb->pixel_format, 1); + tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp); + tile_height = tile_size / tile_width; + + info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width); + info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height); + info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size; } } -static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) +static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) { if (INTEL_INFO(dev_priv)->gen >= 9) return 256 * 1024; @@ -2324,6 +2337,25 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) return 0; } +static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier) +{ + switch (fb_modifier) { + case DRM_FORMAT_MOD_NONE: + return intel_linear_alignment(dev_priv); + case I915_FORMAT_MOD_X_TILED: + if (INTEL_INFO(dev_priv)->gen >= 9) + return 256 * 1024; + return 0; + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + return 1 * 1024 * 1024; + default: + MISSING_CASE(fb_modifier); + return 0; + } +} + int intel_pin_and_fence_fb_obj(struct drm_plane *plane, struct drm_framebuffer *fb, @@ -2338,29 +2370,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - switch (fb->modifier[0]) { - case DRM_FORMAT_MOD_NONE: - alignment = intel_linear_alignment(dev_priv); - break; - case I915_FORMAT_MOD_X_TILED: - if (INTEL_INFO(dev)->gen >= 9) - alignment = 256 * 1024; - else { - /* pin() will align the object as required by fence */ - alignment = 0; - } - break; - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - if (WARN_ONCE(INTEL_INFO(dev)->gen < 9, - "Y tiling bo slipped through, driver bug!\n")) - return -EINVAL; - alignment = 1 * 1024 * 1024; - break; - default: - MISSING_CASE(fb->modifier[0]); - return -EINVAL; - } + alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); intel_fill_fb_ggtt_view(&view, fb, plane_state); @@ -2438,22 +2448,27 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb, /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel * is assumed to be a power-of-two. */ -unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv, - int *x, int *y, - unsigned int tiling_mode, - unsigned int cpp, - unsigned int pitch) -{ - if (tiling_mode != I915_TILING_NONE) { +u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv, + int *x, int *y, + uint64_t fb_modifier, + unsigned int cpp, + unsigned int pitch) +{ + if (fb_modifier != DRM_FORMAT_MOD_NONE) { + unsigned int tile_size, tile_width, tile_height; unsigned int tile_rows, tiles; - tile_rows = *y / 8; - *y %= 8; + tile_size = intel_tile_size(dev_priv); + tile_width = intel_tile_width(dev_priv, fb_modifier, cpp); + tile_height = tile_size / tile_width; + + tile_rows = *y / tile_height; + *y %= tile_height; - tiles = *x / (512/cpp); - *x %= 512/cpp; + tiles = *x / (tile_width/cpp); + *x %= tile_width/cpp; - return tile_rows * pitch * 8 + tiles * 4096; + return tile_rows * pitch * tile_height + tiles * tile_size; } else { unsigned int alignment = intel_linear_alignment(dev_priv) - 1; unsigned int offset; @@ -2598,6 +2613,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct drm_plane_state *plane_state = primary->state; struct drm_crtc_state *crtc_state = intel_crtc->base.state; struct intel_plane *intel_plane = to_intel_plane(primary); + struct intel_plane_state *intel_state = + to_intel_plane_state(plane_state); struct drm_framebuffer *fb; if (!plane_config->fb) @@ -2659,6 +2676,15 @@ valid_fb: plane_state->crtc_w = fb->width; plane_state->crtc_h = fb->height; + intel_state->src.x1 = plane_state->src_x; + intel_state->src.y1 = plane_state->src_y; + intel_state->src.x2 = plane_state->src_x + plane_state->src_w; + intel_state->src.y2 = plane_state->src_y + plane_state->src_h; + intel_state->dst.x1 = plane_state->crtc_x; + intel_state->dst.y1 = plane_state->crtc_y; + intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w; + intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h; + obj = intel_fb_obj(fb); if (obj->tiling_mode != I915_TILING_NONE) dev_priv->preserve_bios_swizzle = true; @@ -2670,37 +2696,22 @@ valid_fb: obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit; } -static void i9xx_update_primary_plane(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y) +static void i9xx_update_primary_plane(struct drm_plane *primary, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = primary->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_plane *primary = crtc->primary; - bool visible = to_intel_plane_state(primary->state)->visible; - struct drm_i915_gem_object *obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; - unsigned long linear_offset; + u32 linear_offset; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); - int pixel_size; - - if (!visible || !fb) { - I915_WRITE(reg, 0); - if (INTEL_INFO(dev)->gen >= 4) - I915_WRITE(DSPSURF(plane), 0); - else - I915_WRITE(DSPADDR(plane), 0); - POSTING_READ(reg); - return; - } - - obj = intel_fb_obj(fb); - if (WARN_ON(obj == NULL)) - return; - - pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + int x = plane_state->src.x1 >> 16; + int y = plane_state->src.y1 >> 16; dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -2714,13 +2725,13 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, * which should always be the user's requested size. */ I915_WRITE(DSPSIZE(plane), - ((intel_crtc->config->pipe_src_h - 1) << 16) | - (intel_crtc->config->pipe_src_w - 1)); + ((crtc_state->pipe_src_h - 1) << 16) | + (crtc_state->pipe_src_w - 1)); I915_WRITE(DSPPOS(plane), 0); } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { I915_WRITE(PRIMSIZE(plane), - ((intel_crtc->config->pipe_src_h - 1) << 16) | - (intel_crtc->config->pipe_src_w - 1)); + ((crtc_state->pipe_src_h - 1) << 16) | + (crtc_state->pipe_src_w - 1)); I915_WRITE(PRIMPOS(plane), 0); I915_WRITE(PRIMCNSTALPHA(plane), 0); } @@ -2758,30 +2769,29 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, if (IS_G4X(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * pixel_size; + linear_offset = y * fb->pitches[0] + x * cpp; if (INTEL_INFO(dev)->gen >= 4) { intel_crtc->dspaddr_offset = - intel_gen4_compute_page_offset(dev_priv, - &x, &y, obj->tiling_mode, - pixel_size, - fb->pitches[0]); + intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], cpp, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; } else { intel_crtc->dspaddr_offset = linear_offset; } - if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { dspcntr |= DISPPLANE_ROTATE_180; - x += (intel_crtc->config->pipe_src_w - 1); - y += (intel_crtc->config->pipe_src_h - 1); + x += (crtc_state->pipe_src_w - 1); + y += (crtc_state->pipe_src_h - 1); /* Finding the last pixel of the last line of the display data and adding to linear_offset*/ linear_offset += - (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + - (intel_crtc->config->pipe_src_w - 1) * pixel_size; + (crtc_state->pipe_src_h - 1) * fb->pitches[0] + + (crtc_state->pipe_src_w - 1) * cpp; } intel_crtc->adjusted_x = x; @@ -2800,37 +2810,40 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, POSTING_READ(reg); } -static void ironlake_update_primary_plane(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y) +static void i9xx_disable_primary_plane(struct drm_plane *primary, + struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_plane *primary = crtc->primary; - bool visible = to_intel_plane_state(primary->state)->visible; - struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; - unsigned long linear_offset; - u32 dspcntr; - i915_reg_t reg = DSPCNTR(plane); - int pixel_size; - if (!visible || !fb) { - I915_WRITE(reg, 0); + I915_WRITE(DSPCNTR(plane), 0); + if (INTEL_INFO(dev_priv)->gen >= 4) I915_WRITE(DSPSURF(plane), 0); - POSTING_READ(reg); - return; - } - - obj = intel_fb_obj(fb); - if (WARN_ON(obj == NULL)) - return; + else + I915_WRITE(DSPADDR(plane), 0); + POSTING_READ(DSPCNTR(plane)); +} - pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); +static void ironlake_update_primary_plane(struct drm_plane *primary, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_device *dev = primary->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + int plane = intel_crtc->plane; + u32 linear_offset; + u32 dspcntr; + i915_reg_t reg = DSPCNTR(plane); + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + int x = plane_state->src.x1 >> 16; + int y = plane_state->src.y1 >> 16; dspcntr = DISPPLANE_GAMMA_ENABLE; - dspcntr |= DISPLAY_PLANE_ENABLE; if (IS_HASWELL(dev) || IS_BROADWELL(dev)) @@ -2865,25 +2878,24 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * pixel_size; + linear_offset = y * fb->pitches[0] + x * cpp; intel_crtc->dspaddr_offset = - intel_gen4_compute_page_offset(dev_priv, - &x, &y, obj->tiling_mode, - pixel_size, - fb->pitches[0]); + intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], cpp, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; - if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { dspcntr |= DISPPLANE_ROTATE_180; if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { - x += (intel_crtc->config->pipe_src_w - 1); - y += (intel_crtc->config->pipe_src_h - 1); + x += (crtc_state->pipe_src_w - 1); + y += (crtc_state->pipe_src_h - 1); /* Finding the last pixel of the last line of the display data and adding to linear_offset*/ linear_offset += - (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + - (intel_crtc->config->pipe_src_w - 1) * pixel_size; + (crtc_state->pipe_src_h - 1) * fb->pitches[0] + + (crtc_state->pipe_src_w - 1) * cpp; } } @@ -2904,37 +2916,15 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, POSTING_READ(reg); } -u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, - uint32_t pixel_format) +u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, uint32_t pixel_format) { - u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8; - - /* - * The stride is either expressed as a multiple of 64 bytes - * chunks for linear buffers or in number of tiles for tiled - * buffers. - */ - switch (fb_modifier) { - case DRM_FORMAT_MOD_NONE: - return 64; - case I915_FORMAT_MOD_X_TILED: - if (INTEL_INFO(dev)->gen == 2) - return 128; - return 512; - case I915_FORMAT_MOD_Y_TILED: - /* No need to check for old gens and Y tiling since this is - * about the display engine and those will be blocked before - * we get here. - */ - return 128; - case I915_FORMAT_MOD_Yf_TILED: - if (bits_per_pixel == 8) - return 64; - else - return 128; - default: - MISSING_CASE(fb_modifier); + if (fb_modifier == DRM_FORMAT_MOD_NONE) { return 64; + } else { + int cpp = drm_format_plane_cpp(pixel_format, 0); + + return intel_tile_width(dev_priv, fb_modifier, cpp); } } @@ -2957,7 +2947,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane, offset = vma->node.start; if (plane == 1) { - offset += vma->ggtt_view.params.rotation_info.uv_start_page * + offset += vma->ggtt_view.params.rotated.uv_start_page * PAGE_SIZE; } @@ -3074,36 +3064,30 @@ u32 skl_plane_ctl_rotation(unsigned int rotation) return 0; } -static void skylake_update_primary_plane(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y) +static void skylake_update_primary_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_plane *plane = crtc->primary; - bool visible = to_intel_plane_state(plane->state)->visible; - struct drm_i915_gem_object *obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_crtc->pipe; u32 plane_ctl, stride_div, stride; u32 tile_height, plane_offset, plane_size; - unsigned int rotation; + unsigned int rotation = plane_state->base.rotation; int x_offset, y_offset; u32 surf_addr; - struct intel_crtc_state *crtc_state = intel_crtc->config; - struct intel_plane_state *plane_state; - int src_x = 0, src_y = 0, src_w = 0, src_h = 0; - int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0; - int scaler_id = -1; - - plane_state = to_intel_plane_state(plane->state); - - if (!visible || !fb) { - I915_WRITE(PLANE_CTL(pipe, 0), 0); - I915_WRITE(PLANE_SURF(pipe, 0), 0); - POSTING_READ(PLANE_CTL(pipe, 0)); - return; - } + int scaler_id = plane_state->scaler_id; + int src_x = plane_state->src.x1 >> 16; + int src_y = plane_state->src.y1 >> 16; + int src_w = drm_rect_width(&plane_state->src) >> 16; + int src_h = drm_rect_height(&plane_state->src) >> 16; + int dst_x = plane_state->dst.x1; + int dst_y = plane_state->dst.y1; + int dst_w = drm_rect_width(&plane_state->dst); + int dst_h = drm_rect_height(&plane_state->dst); plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | @@ -3112,41 +3096,27 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, plane_ctl |= skl_plane_ctl_format(fb->pixel_format); plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; - - rotation = plane->state->rotation; plane_ctl |= skl_plane_ctl_rotation(rotation); - obj = intel_fb_obj(fb); - stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], + stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], fb->pixel_format); surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); WARN_ON(drm_rect_width(&plane_state->src) == 0); - scaler_id = plane_state->scaler_id; - src_x = plane_state->src.x1 >> 16; - src_y = plane_state->src.y1 >> 16; - src_w = drm_rect_width(&plane_state->src) >> 16; - src_h = drm_rect_height(&plane_state->src) >> 16; - dst_x = plane_state->dst.x1; - dst_y = plane_state->dst.y1; - dst_w = drm_rect_width(&plane_state->dst); - dst_h = drm_rect_height(&plane_state->dst); - - WARN_ON(x != src_x || y != src_y); - if (intel_rotation_90_or_270(rotation)) { + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev, fb->pixel_format, - fb->modifier[0], 0); + tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); stride = DIV_ROUND_UP(fb->height, tile_height); - x_offset = stride * tile_height - y - src_h; - y_offset = x; + x_offset = stride * tile_height - src_y - src_h; + y_offset = src_x; plane_size = (src_w - 1) << 16 | (src_h - 1); } else { stride = fb->pitches[0] / stride_div; - x_offset = x; - y_offset = y; + x_offset = src_x; + y_offset = src_y; plane_size = (src_h - 1) << 16 | (src_w - 1); } plane_offset = y_offset << 16 | x_offset; @@ -3179,20 +3149,27 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, POSTING_READ(PLANE_SURF(pipe, 0)); } -/* Assume fb object is pinned & idle & fenced and just update base pointers */ -static int -intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y, enum mode_set_atomic state) +static void skylake_disable_primary_plane(struct drm_plane *primary, + struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = to_intel_crtc(crtc)->pipe; - if (dev_priv->fbc.deactivate) - dev_priv->fbc.deactivate(dev_priv); + I915_WRITE(PLANE_CTL(pipe, 0), 0); + I915_WRITE(PLANE_SURF(pipe, 0), 0); + POSTING_READ(PLANE_SURF(pipe, 0)); +} - dev_priv->display.update_primary_plane(crtc, fb, x, y); +/* Assume fb object is pinned & idle & fenced and just update base pointers */ +static int +intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + /* Support for kgdboc is disabled, this needs a major rework. */ + DRM_ERROR("legacy panic handler not supported any more.\n"); - return 0; + return -ENODEV; } static void intel_complete_page_flips(struct drm_device *dev) @@ -3219,8 +3196,10 @@ static void intel_update_primary_planes(struct drm_device *dev) drm_modeset_lock_crtc(crtc, &plane->base); plane_state = to_intel_plane_state(plane->base.state); - if (crtc->state->active && plane_state->base.fb) - plane->commit_plane(&plane->base, plane_state); + if (plane_state->visible) + plane->update_plane(&plane->base, + to_intel_crtc_state(crtc->state), + plane_state); drm_modeset_unlock_crtc(crtc); } @@ -4452,7 +4431,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, - &state->scaler_state.scaler_id, DRM_ROTATE_0, + &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), state->pipe_src_w, state->pipe_src_h, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); } @@ -4817,7 +4796,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc) intel_update_watermarks(&crtc->base); if (atomic->update_fbc) - intel_fbc_update(crtc); + intel_fbc_post_update(crtc); if (atomic->post_enable_primary) intel_post_enable_primary(&crtc->base); @@ -4825,26 +4804,39 @@ static void intel_post_plane_update(struct intel_crtc *crtc) memset(atomic, 0, sizeof(*atomic)); } -static void intel_pre_plane_update(struct intel_crtc *crtc) +static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc_atomic_commit *atomic = &crtc->atomic; struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); + struct drm_atomic_state *old_state = old_crtc_state->base.state; + struct drm_plane *primary = crtc->base.primary; + struct drm_plane_state *old_pri_state = + drm_atomic_get_existing_plane_state(old_state, primary); + bool modeset = needs_modeset(&pipe_config->base); - if (atomic->disable_fbc) - intel_fbc_deactivate(crtc); + if (atomic->update_fbc) + intel_fbc_pre_update(crtc); - if (crtc->atomic.disable_ips) - hsw_disable_ips(crtc); + if (old_pri_state) { + struct intel_plane_state *primary_state = + to_intel_plane_state(primary->state); + struct intel_plane_state *old_primary_state = + to_intel_plane_state(old_pri_state); - if (atomic->pre_disable_primary) - intel_pre_disable_primary(&crtc->base); + if (old_primary_state->visible && + (modeset || !primary_state->visible)) + intel_pre_disable_primary(&crtc->base); + } if (pipe_config->disable_cxsr) { crtc->wm.cxsr_allowed = false; - intel_set_memory_cxsr(dev_priv, false); + + if (old_crtc_state->base.active) + intel_set_memory_cxsr(dev_priv, false); } if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed) @@ -4945,8 +4937,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) intel_wait_for_vblank(dev, pipe); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); - - intel_fbc_enable(intel_crtc); } /* IPS only exists on ULT machines and is tied to pipe A. */ @@ -5059,8 +5049,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_wait_for_vblank(dev, hsw_workaround_pipe); intel_wait_for_vblank(dev, hsw_workaround_pipe); } - - intel_fbc_enable(intel_crtc); } static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) @@ -5141,8 +5129,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) } intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); - - intel_fbc_disable_crtc(intel_crtc); } static void haswell_crtc_disable(struct drm_crtc *crtc) @@ -5193,8 +5179,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, true); } - - intel_fbc_disable_crtc(intel_crtc); } static void i9xx_pfit_enable(struct intel_crtc *crtc) @@ -5370,6 +5354,7 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv, static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) { + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long put_domains[I915_MAX_PIPES] = {}; @@ -5383,13 +5368,9 @@ static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) modeset_get_crtc_power_domains(crtc); } - if (dev_priv->display.modeset_commit_cdclk) { - unsigned int cdclk = to_intel_atomic_state(state)->cdclk; - - if (cdclk != dev_priv->cdclk_freq && - !WARN_ON(!state->allow_modeset)) - dev_priv->display.modeset_commit_cdclk(state); - } + if (dev_priv->display.modeset_commit_cdclk && + intel_state->dev_cdclk != dev_priv->cdclk_freq) + dev_priv->display.modeset_commit_cdclk(state); for (i = 0; i < I915_MAX_PIPES; i++) if (put_domains[i]) @@ -6063,22 +6044,31 @@ static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, static int intel_mode_max_pixclk(struct drm_device *dev, struct drm_atomic_state *state) { - struct intel_crtc *intel_crtc; - struct intel_crtc_state *crtc_state; - int max_pixclk = 0; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + unsigned max_pixclk = 0, i; + enum pipe pipe; - for_each_intel_crtc(dev, intel_crtc) { - crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); + memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, + sizeof(intel_state->min_pixclk)); - if (!crtc_state->base.enable) - continue; + for_each_crtc_in_state(state, crtc, crtc_state, i) { + int pixclk = 0; + + if (crtc_state->enable) + pixclk = crtc_state->adjusted_mode.crtc_clock; - max_pixclk = max(max_pixclk, - crtc_state->base.adjusted_mode.crtc_clock); + intel_state->min_pixclk[i] = pixclk; } + if (!intel_state->active_crtcs) + return 0; + + for_each_pipe(dev_priv, pipe) + max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk); + return max_pixclk; } @@ -6087,13 +6077,18 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; int max_pixclk = intel_mode_max_pixclk(dev, state); + struct intel_atomic_state *intel_state = + to_intel_atomic_state(state); if (max_pixclk < 0) return max_pixclk; - to_intel_atomic_state(state)->cdclk = + intel_state->cdclk = intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0); + return 0; } @@ -6102,13 +6097,18 @@ static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; int max_pixclk = intel_mode_max_pixclk(dev, state); + struct intel_atomic_state *intel_state = + to_intel_atomic_state(state); if (max_pixclk < 0) return max_pixclk; - to_intel_atomic_state(state)->cdclk = + intel_state->cdclk = intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk); + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); + return 0; } @@ -6151,8 +6151,10 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; - unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); + unsigned req_cdclk = old_intel_state->dev_cdclk; /* * FIXME: We can end up here with all power domains off, yet @@ -6287,8 +6289,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); - - intel_fbc_enable(intel_crtc); } static void i9xx_pfit_disable(struct intel_crtc *crtc) @@ -6351,8 +6351,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (!IS_GEN2(dev)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - - intel_fbc_disable_crtc(intel_crtc); } static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) @@ -6376,6 +6374,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) dev_priv->display.crtc_disable(crtc); intel_crtc->active = false; + intel_fbc_disable(intel_crtc); intel_update_watermarks(crtc); intel_disable_shared_dpll(intel_crtc); @@ -6383,6 +6382,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) for_each_power_domain(domain, domains) intel_display_power_put(dev_priv, domain); intel_crtc->enabled_power_domains = 0; + + dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); + dev_priv->min_pixclk[intel_crtc->pipe] = 0; } /* @@ -7593,26 +7595,34 @@ static void chv_prepare_pll(struct intel_crtc *crtc, * in cases where we need the PLL enabled even when @pipe is not going to * be enabled. */ -void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, - const struct dpll *dpll) +int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, + const struct dpll *dpll) { struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); - struct intel_crtc_state pipe_config = { - .base.crtc = &crtc->base, - .pixel_multiplier = 1, - .dpll = *dpll, - }; + struct intel_crtc_state *pipe_config; + + pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); + if (!pipe_config) + return -ENOMEM; + + pipe_config->base.crtc = &crtc->base; + pipe_config->pixel_multiplier = 1; + pipe_config->dpll = *dpll; if (IS_CHERRYVIEW(dev)) { - chv_compute_dpll(crtc, &pipe_config); - chv_prepare_pll(crtc, &pipe_config); - chv_enable_pll(crtc, &pipe_config); + chv_compute_dpll(crtc, pipe_config); + chv_prepare_pll(crtc, pipe_config); + chv_enable_pll(crtc, pipe_config); } else { - vlv_compute_dpll(crtc, &pipe_config); - vlv_prepare_pll(crtc, &pipe_config); - vlv_enable_pll(crtc, &pipe_config); + vlv_compute_dpll(crtc, pipe_config); + vlv_prepare_pll(crtc, pipe_config); + vlv_enable_pll(crtc, pipe_config); } + + kfree(pipe_config); + + return 0; } /** @@ -9246,7 +9256,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->width = ((val >> 0) & 0x1fff) + 1; val = I915_READ(PLANE_STRIDE(pipe, 0)); - stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0], + stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0], fb->pixel_format); fb->pitches[0] = (val & 0x3ff) * stride_mult; @@ -9662,14 +9672,14 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) val |= PCH_LP_PARTITION_LEVEL_DISABLE; I915_WRITE(SOUTH_DSPCLK_GATE_D, val); } - - intel_prepare_ddi(dev); } static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; - unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); + unsigned int req_cdclk = old_intel_state->dev_cdclk; broxton_set_cdclk(dev, req_cdclk); } @@ -9677,29 +9687,41 @@ static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) /* compute the max rate for new configuration */ static int ilk_max_pixel_rate(struct drm_atomic_state *state) { - struct intel_crtc *intel_crtc; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = state->dev->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; struct intel_crtc_state *crtc_state; - int max_pixel_rate = 0; + unsigned max_pixel_rate = 0, i; + enum pipe pipe; - for_each_intel_crtc(state->dev, intel_crtc) { - int pixel_rate; + memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, + sizeof(intel_state->min_pixclk)); - crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); + for_each_crtc_in_state(state, crtc, cstate, i) { + int pixel_rate; - if (!crtc_state->base.enable) + crtc_state = to_intel_crtc_state(cstate); + if (!crtc_state->base.enable) { + intel_state->min_pixclk[i] = 0; continue; + } pixel_rate = ilk_pipe_pixel_rate(crtc_state); /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled) + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); - max_pixel_rate = max(max_pixel_rate, pixel_rate); + intel_state->min_pixclk[i] = pixel_rate; } + if (!intel_state->active_crtcs) + return 0; + + for_each_pipe(dev_priv, pipe) + max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate); + return max_pixel_rate; } @@ -9783,6 +9805,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); int max_pixclk = ilk_max_pixel_rate(state); int cdclk; @@ -9805,7 +9828,9 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) return -EINVAL; } - to_intel_atomic_state(state)->cdclk = cdclk; + intel_state->cdclk = intel_state->dev_cdclk = cdclk; + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = 337500; return 0; } @@ -9813,7 +9838,9 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; - unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); + unsigned req_cdclk = old_intel_state->dev_cdclk; broadwell_set_cdclk(dev, req_cdclk); } @@ -9821,8 +9848,13 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - if (!intel_ddi_pll_select(crtc, crtc_state)) - return -EINVAL; + struct intel_encoder *intel_encoder = + intel_ddi_get_crtc_new_encoder(crtc_state); + + if (intel_encoder->type != INTEL_OUTPUT_DSI) { + if (!intel_ddi_pll_select(crtc, crtc_state)) + return -EINVAL; + } crtc->lowfreq_avail = false; @@ -10026,16 +10058,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, return true; } -static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) +static void i845_update_cursor(struct drm_crtc *crtc, u32 base, + const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t cntl = 0, size = 0; - if (on) { - unsigned int width = intel_crtc->base.cursor->state->crtc_w; - unsigned int height = intel_crtc->base.cursor->state->crtc_h; + if (plane_state && plane_state->visible) { + unsigned int width = plane_state->base.crtc_w; + unsigned int height = plane_state->base.crtc_h; unsigned int stride = roundup_pow_of_two(width) * 4; switch (stride) { @@ -10088,7 +10121,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) } } -static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) +static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, + const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -10096,9 +10130,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) int pipe = intel_crtc->pipe; uint32_t cntl = 0; - if (on) { + if (plane_state && plane_state->visible) { cntl = MCURSOR_GAMMA_ENABLE; - switch (intel_crtc->base.cursor->state->crtc_w) { + switch (plane_state->base.crtc_w) { case 64: cntl |= CURSOR_MODE_64_ARGB_AX; break; @@ -10109,17 +10143,17 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) cntl |= CURSOR_MODE_256_ARGB_AX; break; default: - MISSING_CASE(intel_crtc->base.cursor->state->crtc_w); + MISSING_CASE(plane_state->base.crtc_w); return; } cntl |= pipe << 28; /* Connect to correct pipe */ if (HAS_DDI(dev)) cntl |= CURSOR_PIPE_CSC_ENABLE; - } - if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) - cntl |= CURSOR_ROTATE_180; + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) + cntl |= CURSOR_ROTATE_180; + } if (intel_crtc->cursor_cntl != cntl) { I915_WRITE(CURCNTR(pipe), cntl); @@ -10136,56 +10170,45 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ static void intel_crtc_update_cursor(struct drm_crtc *crtc, - bool on) + const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - struct drm_plane_state *cursor_state = crtc->cursor->state; - int x = cursor_state->crtc_x; - int y = cursor_state->crtc_y; - u32 base = 0, pos = 0; - - base = intel_crtc->cursor_addr; + u32 base = intel_crtc->cursor_addr; + u32 pos = 0; - if (x >= intel_crtc->config->pipe_src_w) - on = false; + if (plane_state) { + int x = plane_state->base.crtc_x; + int y = plane_state->base.crtc_y; - if (y >= intel_crtc->config->pipe_src_h) - on = false; - - if (x < 0) { - if (x + cursor_state->crtc_w <= 0) - on = false; - - pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; - x = -x; - } - pos |= x << CURSOR_X_SHIFT; + if (x < 0) { + pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; + x = -x; + } + pos |= x << CURSOR_X_SHIFT; - if (y < 0) { - if (y + cursor_state->crtc_h <= 0) - on = false; + if (y < 0) { + pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; + y = -y; + } + pos |= y << CURSOR_Y_SHIFT; - pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; - y = -y; + /* ILK+ do this automagically */ + if (HAS_GMCH_DISPLAY(dev) && + plane_state->base.rotation == BIT(DRM_ROTATE_180)) { + base += (plane_state->base.crtc_h * + plane_state->base.crtc_w - 1) * 4; + } } - pos |= y << CURSOR_Y_SHIFT; I915_WRITE(CURPOS(pipe), pos); - /* ILK+ do this automagically */ - if (HAS_GMCH_DISPLAY(dev) && - crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { - base += (cursor_state->crtc_h * - cursor_state->crtc_w - 1) * 4; - } - if (IS_845G(dev) || IS_I865G(dev)) - i845_update_cursor(crtc, base, on); + i845_update_cursor(crtc, base, plane_state); else - i9xx_update_cursor(crtc, base, on); + i9xx_update_cursor(crtc, base, plane_state); } static bool cursor_size_ok(struct drm_device *dev, @@ -10498,7 +10521,6 @@ retry: } connector_state->crtc = crtc; - connector_state->best_encoder = &intel_encoder->base; crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); if (IS_ERR(crtc_state)) { @@ -10594,7 +10616,6 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, if (IS_ERR(crtc_state)) goto fail; - connector_state->best_encoder = NULL; connector_state->crtc = NULL; crtc_state->base.enable = crtc_state->base.active = false; @@ -10778,7 +10799,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; struct drm_display_mode *mode; - struct intel_crtc_state pipe_config; + struct intel_crtc_state *pipe_config; int htot = I915_READ(HTOTAL(cpu_transcoder)); int hsync = I915_READ(HSYNC(cpu_transcoder)); int vtot = I915_READ(VTOTAL(cpu_transcoder)); @@ -10789,6 +10810,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, if (!mode) return NULL; + pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); + if (!pipe_config) { + kfree(mode); + return NULL; + } + /* * Construct a pipe_config sufficient for getting the clock info * back out of crtc_clock_get. @@ -10796,14 +10823,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need * to use a real value here instead. */ - pipe_config.cpu_transcoder = (enum transcoder) pipe; - pipe_config.pixel_multiplier = 1; - pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); - pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); - pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); - i9xx_crtc_clock_get(intel_crtc, &pipe_config); - - mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; + pipe_config->cpu_transcoder = (enum transcoder) pipe; + pipe_config->pixel_multiplier = 1; + pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe)); + pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe)); + pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe)); + i9xx_crtc_clock_get(intel_crtc, pipe_config); + + mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; mode->hdisplay = (htot & 0xffff) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->hsync_start = (hsync & 0xffff) + 1; @@ -10815,6 +10842,8 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, drm_mode_set_name(mode); + kfree(pipe_config); + return mode; } @@ -10885,6 +10914,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_unlock(&dev->struct_mutex); intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); + intel_fbc_post_update(crtc); drm_framebuffer_unreference(work->old_fb); BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); @@ -11319,13 +11349,12 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, */ if (intel_rotation_90_or_270(rotation)) { /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev, fb->pixel_format, - fb->modifier[0], 0); + tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0); stride = DIV_ROUND_UP(fb->height, tile_height); } else { stride = fb->pitches[0] / - intel_fb_stride_alignment(dev, fb->modifier[0], - fb->pixel_format); + intel_fb_stride_alignment(dev_priv, fb->modifier[0], + fb->pixel_format); } /* @@ -11601,6 +11630,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, crtc->primary->fb = fb; update_state_fb(crtc->primary); + intel_fbc_pre_update(intel_crtc); work->pending_flip_obj = obj; @@ -11660,9 +11690,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj->last_write_req); } else { if (!request) { - ret = i915_gem_request_alloc(ring, ring->default_context, &request); - if (ret) + request = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(request)) { + ret = PTR_ERR(request); goto cleanup_unpin; + } } ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, @@ -11683,7 +11715,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); - intel_fbc_deactivate(intel_crtc); intel_frontbuffer_flip_prepare(dev, to_intel_plane(primary)->frontbuffer_bit); @@ -11694,7 +11725,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, cleanup_unpin: intel_unpin_fb_obj(fb, crtc->primary->state); cleanup_pending: - if (request) + if (!IS_ERR_OR_NULL(request)) i915_gem_request_cancel(request); atomic_dec(&intel_crtc->unpin_work_count); mutex_unlock(&dev->struct_mutex); @@ -11805,7 +11836,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *plane = plane_state->plane; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane_state *old_plane_state = to_intel_plane_state(plane->state); int idx = intel_crtc->base.base.id, ret; @@ -11831,8 +11861,13 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, if (!was_crtc_enabled && WARN_ON(was_visible)) was_visible = false; - if (!is_crtc_enabled && WARN_ON(visible)) - visible = false; + /* + * Visibility is calculated as if the crtc was on, but + * after scaler setup everything depends on it being off + * when the crtc isn't active. + */ + if (!is_crtc_enabled) + to_intel_plane_state(plane_state)->visible = visible = false; if (!was_visible && !visible) return 0; @@ -11866,39 +11901,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: - intel_crtc->atomic.pre_disable_primary = turn_off; intel_crtc->atomic.post_enable_primary = turn_on; - - if (turn_off) { - /* - * FIXME: Actually if we will still have any other - * plane enabled on the pipe we could let IPS enabled - * still, but for now lets consider that when we make - * primary invisible by setting DSPCNTR to 0 on - * update_primary_plane function IPS needs to be - * disable. - */ - intel_crtc->atomic.disable_ips = true; - - intel_crtc->atomic.disable_fbc = true; - } - - /* - * FBC does not work on some platforms for rotated - * planes, so disable it when rotation is not 0 and - * update it when rotation is set back to 0. - * - * FIXME: This is redundant with the fbc update done in - * the primary plane enable function except that that - * one is done too late. We eventually need to unify - * this. - */ - - if (visible && - INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && - dev_priv->fbc.crtc == intel_crtc && - plane_state->rotation != BIT(DRM_ROTATE_0)) - intel_crtc->atomic.disable_fbc = true; + intel_crtc->atomic.update_fbc = true; /* * BDW signals flip done immediately if the plane @@ -11908,7 +11912,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, if (turn_on && IS_BROADWELL(dev)) intel_crtc->atomic.wait_vblank = true; - intel_crtc->atomic.update_fbc |= visible || mode_changed; break; case DRM_PLANE_TYPE_CURSOR: break; @@ -12529,19 +12532,22 @@ intel_compare_m_n(unsigned int m, unsigned int n, BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); - if (m > m2) { - while (m > m2) { + if (n > n2) { + while (n > n2) { m2 <<= 1; n2 <<= 1; } - } else if (m < m2) { - while (m < m2) { + } else if (n < n2) { + while (n < n2) { m <<= 1; n <<= 1; } } - return m == m2 && n == n2; + if (n != n2) + return false; + + return intel_fuzzy_clock_check(m, m2); } static bool @@ -13216,15 +13222,27 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state) static int intel_modeset_checks(struct drm_atomic_state *state) { - struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = state->dev->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int ret = 0, i; if (!check_digital_port_conflicts(state)) { DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); return -EINVAL; } + intel_state->modeset = true; + intel_state->active_crtcs = dev_priv->active_crtcs; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (crtc_state->active) + intel_state->active_crtcs |= 1 << i; + else + intel_state->active_crtcs &= ~(1 << i); + } + /* * See if the config requires any additional preparation, e.g. * to adjust global state with pipes off. We need to do this @@ -13233,22 +13251,19 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * adjusted_mode bits in the crtc directly. */ if (dev_priv->display.modeset_calc_cdclk) { - unsigned int cdclk; - ret = dev_priv->display.modeset_calc_cdclk(state); - cdclk = to_intel_atomic_state(state)->cdclk; - if (!ret && cdclk != dev_priv->cdclk_freq) + if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) ret = intel_modeset_all_pipes(state); if (ret < 0) return ret; } else - to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq; + to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; intel_modeset_clear_plls(state); - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) return haswell_mode_set_planes_workaround(state); return 0; @@ -13301,6 +13316,7 @@ static void calc_watermark_data(struct drm_atomic_state *state) static int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; @@ -13343,7 +13359,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; if (i915.fastboot && - intel_pipe_config_compare(state->dev, + intel_pipe_config_compare(dev, to_intel_crtc_state(crtc->state), pipe_config, true)) { crtc_state->mode_changed = false; @@ -13369,12 +13385,13 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) return ret; } else - intel_state->cdclk = to_i915(state->dev)->cdclk_freq; + intel_state->cdclk = dev_priv->cdclk_freq; - ret = drm_atomic_helper_check_planes(state->dev, state); + ret = drm_atomic_helper_check_planes(dev, state); if (ret) return ret; + intel_fbc_choose_crtc(dev_priv, state); calc_watermark_data(state); return 0; @@ -13466,12 +13483,12 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) { + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; - int ret = 0; - int i; - bool any_ms = false; + int ret = 0, i; + bool hw_check = intel_state->modeset; ret = intel_atomic_prepare_commit(dev, state, async); if (ret) { @@ -13482,19 +13499,26 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_swap_state(dev, state); dev_priv->wm.config = to_intel_atomic_state(state)->wm_config; + if (intel_state->modeset) { + memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, + sizeof(intel_state->min_pixclk)); + dev_priv->active_crtcs = intel_state->active_crtcs; + dev_priv->atomic_cdclk_freq = intel_state->cdclk; + } + for_each_crtc_in_state(state, crtc, crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); if (!needs_modeset(crtc->state)) continue; - any_ms = true; - intel_pre_plane_update(intel_crtc); + intel_pre_plane_update(to_intel_crtc_state(crtc_state)); if (crtc_state->active) { intel_crtc_disable_planes(crtc, crtc_state->plane_mask); dev_priv->display.crtc_disable(crtc); intel_crtc->active = false; + intel_fbc_disable(intel_crtc); intel_disable_shared_dpll(intel_crtc); /* @@ -13513,7 +13537,7 @@ static int intel_atomic_commit(struct drm_device *dev, * update the the output configuration. */ intel_modeset_update_crtc_state(state); - if (any_ms) { + if (intel_state->modeset) { intel_shared_dpll_commit(state); drm_atomic_helper_update_legacy_modeset_state(state->dev, state); @@ -13540,11 +13564,14 @@ static int intel_atomic_commit(struct drm_device *dev, put_domains = modeset_get_crtc_power_domains(crtc); /* make sure intel_modeset_check_state runs */ - any_ms = true; + hw_check = true; } if (!modeset) - intel_pre_plane_update(intel_crtc); + intel_pre_plane_update(to_intel_crtc_state(crtc_state)); + + if (crtc->state->active && intel_crtc->atomic.update_fbc) + intel_fbc_enable(intel_crtc); if (crtc->state->active && (crtc->state->planes_changed || update_pipe)) @@ -13567,11 +13594,24 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_cleanup_planes(dev, state); mutex_unlock(&dev->struct_mutex); - if (any_ms) + if (hw_check) intel_modeset_check_state(dev, state); drm_atomic_state_free(state); + /* As one of the primary mmio accessors, KMS has a high likelihood + * of triggering bugs in unclaimed access. After we finish + * modesetting, see if an error has been flagged, and if so + * enable debugging for the next modeset - and hope we catch + * the culprit. + * + * XXX note that we assume display power is on at this point. + * This might hold true now but we need to add pm helper to check + * unclaimed only when the hardware is on, as atomic commits + * can happen also when the device is completely off. + */ + intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + return 0; } @@ -13860,7 +13900,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state struct drm_i915_private *dev_priv; int crtc_clock, cdclk; - if (!intel_crtc || !crtc_state) + if (!intel_crtc || !crtc_state->base.enable) return DRM_PLANE_HELPER_NO_SCALING; dev = intel_crtc->base.dev; @@ -13909,32 +13949,6 @@ intel_check_primary_plane(struct drm_plane *plane, &state->visible); } -static void -intel_commit_primary_plane(struct drm_plane *plane, - struct intel_plane_state *state) -{ - struct drm_crtc *crtc = state->base.crtc; - struct drm_framebuffer *fb = state->base.fb; - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - crtc = crtc ? crtc : plane->crtc; - - dev_priv->display.update_primary_plane(crtc, fb, - state->src.x1 >> 16, - state->src.y1 >> 16); -} - -static void -intel_disable_primary_plane(struct drm_plane *plane, - struct drm_crtc *crtc) -{ - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - dev_priv->display.update_primary_plane(crtc, NULL, 0, 0); -} - static void intel_begin_crtc_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -14019,20 +14033,33 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, primary->plane = pipe; primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); primary->check_plane = intel_check_primary_plane; - primary->commit_plane = intel_commit_primary_plane; - primary->disable_plane = intel_disable_primary_plane; if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) primary->plane = !pipe; if (INTEL_INFO(dev)->gen >= 9) { intel_primary_formats = skl_primary_formats; num_formats = ARRAY_SIZE(skl_primary_formats); + + primary->update_plane = skylake_update_primary_plane; + primary->disable_plane = skylake_disable_primary_plane; + } else if (HAS_PCH_SPLIT(dev)) { + intel_primary_formats = i965_primary_formats; + num_formats = ARRAY_SIZE(i965_primary_formats); + + primary->update_plane = ironlake_update_primary_plane; + primary->disable_plane = i9xx_disable_primary_plane; } else if (INTEL_INFO(dev)->gen >= 4) { intel_primary_formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); + + primary->update_plane = i9xx_update_primary_plane; + primary->disable_plane = i9xx_disable_primary_plane; } else { intel_primary_formats = i8xx_primary_formats; num_formats = ARRAY_SIZE(i8xx_primary_formats); + + primary->update_plane = i9xx_update_primary_plane; + primary->disable_plane = i9xx_disable_primary_plane; } drm_universal_plane_init(dev, &primary->base, 0, @@ -14131,22 +14158,23 @@ static void intel_disable_cursor_plane(struct drm_plane *plane, struct drm_crtc *crtc) { - intel_crtc_update_cursor(crtc, false); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + intel_crtc->cursor_addr = 0; + intel_crtc_update_cursor(crtc, NULL); } static void -intel_commit_cursor_plane(struct drm_plane *plane, - struct intel_plane_state *state) +intel_update_cursor_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *state) { - struct drm_crtc *crtc = state->base.crtc; + struct drm_crtc *crtc = crtc_state->base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = plane->dev; - struct intel_crtc *intel_crtc; struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); uint32_t addr; - crtc = crtc ? crtc : plane->crtc; - intel_crtc = to_intel_crtc(crtc); - if (!obj) addr = 0; else if (!INTEL_INFO(dev)->cursor_needs_physical) @@ -14155,9 +14183,7 @@ intel_commit_cursor_plane(struct drm_plane *plane, addr = obj->phys_handle->busaddr; intel_crtc->cursor_addr = addr; - - if (crtc->state->active) - intel_crtc_update_cursor(crtc, state->visible); + intel_crtc_update_cursor(crtc, state); } static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, @@ -14183,7 +14209,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, cursor->plane = pipe; cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); cursor->check_plane = intel_check_cursor_plane; - cursor->commit_plane = intel_commit_cursor_plane; + cursor->update_plane = intel_update_cursor_plane; cursor->disable_plane = intel_disable_cursor_plane; drm_universal_plane_init(dev, &cursor->base, 0, @@ -14630,10 +14656,12 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, u32 gen = INTEL_INFO(dev)->gen; if (gen >= 9) { + int cpp = drm_format_plane_cpp(pixel_format, 0); + /* "The stride in bytes must not exceed the of the size of 8K * pixels and 32K bytes." */ - return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); + return min(8192 * cpp, 32768); } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { return 32*1024; } else if (gen >= 4) { @@ -14657,6 +14685,7 @@ static int intel_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj) { + struct drm_i915_private *dev_priv = to_i915(dev); unsigned int aligned_height; int ret; u32 pitch_limit, stride_alignment; @@ -14698,7 +14727,8 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; } - stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0], + stride_alignment = intel_fb_stride_alignment(dev_priv, + mode_cmd->modifier[0], mode_cmd->pixel_format); if (mode_cmd->pitches[0] & (stride_alignment - 1)) { DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", @@ -14790,7 +14820,6 @@ static int intel_framebuffer_init(struct drm_device *dev, drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); intel_fb->obj = obj; - intel_fb->obj->framebuffer_references++; ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { @@ -14798,6 +14827,8 @@ static int intel_framebuffer_init(struct drm_device *dev, return ret; } + intel_fb->obj->framebuffer_references++; + return 0; } @@ -14861,8 +14892,6 @@ static void intel_init_display(struct drm_device *dev) haswell_crtc_compute_clock; dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; - dev_priv->display.update_primary_plane = - skylake_update_primary_plane; } else if (HAS_DDI(dev)) { dev_priv->display.get_pipe_config = haswell_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -14871,8 +14900,6 @@ static void intel_init_display(struct drm_device *dev) haswell_crtc_compute_clock; dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; - dev_priv->display.update_primary_plane = - ironlake_update_primary_plane; } else if (HAS_PCH_SPLIT(dev)) { dev_priv->display.get_pipe_config = ironlake_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -14881,8 +14908,6 @@ static void intel_init_display(struct drm_device *dev) ironlake_crtc_compute_clock; dev_priv->display.crtc_enable = ironlake_crtc_enable; dev_priv->display.crtc_disable = ironlake_crtc_disable; - dev_priv->display.update_primary_plane = - ironlake_update_primary_plane; } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -14890,8 +14915,6 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; dev_priv->display.crtc_enable = valleyview_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; - dev_priv->display.update_primary_plane = - i9xx_update_primary_plane; } else { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -14899,8 +14922,6 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; dev_priv->display.crtc_enable = i9xx_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; - dev_priv->display.update_primary_plane = - i9xx_update_primary_plane; } /* Returns the core display clock speed */ @@ -15206,12 +15227,89 @@ static void i915_disable_vga(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + intel_update_cdclk(dev); - intel_prepare_ddi(dev); + + dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; + intel_init_clock_gating(dev); intel_enable_gt_powersave(dev); } +/* + * Calculate what we think the watermarks should be for the state we've read + * out of the hardware and then immediately program those watermarks so that + * we ensure the hardware settings match our internal state. + * + * We can calculate what we think WM's should be by creating a duplicate of the + * current state (which was constructed during hardware readout) and running it + * through the atomic check code to calculate new watermark values in the + * state object. + */ +static void sanitize_watermarks(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_atomic_state *state; + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; + struct drm_modeset_acquire_ctx ctx; + int ret; + int i; + + /* Only supported on platforms that use atomic watermark design */ + if (!dev_priv->display.program_watermarks) + return; + + /* + * We need to hold connection_mutex before calling duplicate_state so + * that the connector loop is protected. + */ + drm_modeset_acquire_init(&ctx, 0); +retry: + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } else if (WARN_ON(ret)) { + goto fail; + } + + state = drm_atomic_helper_duplicate_state(dev, &ctx); + if (WARN_ON(IS_ERR(state))) + goto fail; + + ret = intel_atomic_check(dev, state); + if (ret) { + /* + * If we fail here, it means that the hardware appears to be + * programmed in a way that shouldn't be possible, given our + * understanding of watermark requirements. This might mean a + * mistake in the hardware readout code or a mistake in the + * watermark calculations for a given platform. Raise a WARN + * so that this is noticeable. + * + * If this actually happens, we'll have to just leave the + * BIOS-programmed watermarks untouched and hope for the best. + */ + WARN(true, "Could not determine valid watermarks for inherited state\n"); + goto fail; + } + + /* Write calculated watermark values back */ + to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config; + for_each_crtc_in_state(state, crtc, cstate, i) { + struct intel_crtc_state *cs = to_intel_crtc_state(cstate); + + dev_priv->display.program_watermarks(cs); + } + + drm_atomic_state_free(state); +fail: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -15332,6 +15430,13 @@ void intel_modeset_init(struct drm_device *dev) */ intel_find_initial_plane_obj(crtc, &plane_config); } + + /* + * Make sure hardware watermarks really match the state we read out. + * Note that we need to do this after reconstructing the BIOS fb's + * since the watermark calculation done here will use pstate->fb. + */ + sanitize_watermarks(dev); } static void intel_enable_pipe_a(struct drm_device *dev) @@ -15462,6 +15567,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) crtc->base.state->active = crtc->active; crtc->base.enabled = crtc->active; crtc->base.state->connector_mask = 0; + crtc->base.state->encoder_mask = 0; /* Because we only establish the connector -> encoder -> * crtc links if something is active, this means the @@ -15604,16 +15710,40 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_connector *connector; int i; + dev_priv->active_crtcs = 0; + for_each_intel_crtc(dev, crtc) { - __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state); - memset(crtc->config, 0, sizeof(*crtc->config)); - crtc->config->base.crtc = &crtc->base; + struct intel_crtc_state *crtc_state = crtc->config; + int pixclk = 0; - crtc->active = dev_priv->display.get_pipe_config(crtc, - crtc->config); + __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base); + memset(crtc_state, 0, sizeof(*crtc_state)); + crtc_state->base.crtc = &crtc->base; - crtc->base.state->active = crtc->active; - crtc->base.enabled = crtc->active; + crtc_state->base.active = crtc_state->base.enable = + dev_priv->display.get_pipe_config(crtc, crtc_state); + + crtc->base.enabled = crtc_state->base.enable; + crtc->active = crtc_state->base.active; + + if (crtc_state->base.active) { + dev_priv->active_crtcs |= 1 << crtc->pipe; + + if (IS_BROADWELL(dev_priv)) { + pixclk = ilk_pipe_pixel_rate(crtc_state); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (crtc_state->ips_enabled) + pixclk = DIV_ROUND_UP(pixclk * 100, 95); + } else if (IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv) || + IS_BROXTON(dev_priv)) + pixclk = crtc_state->base.adjusted_mode.crtc_clock; + else + WARN_ON(dev_priv->display.modeset_calc_cdclk); + } + + dev_priv->min_pixclk[crtc->pipe] = pixclk; readout_plane_state(crtc); @@ -15677,6 +15807,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) */ encoder->base.crtc->state->connector_mask |= 1 << drm_connector_index(&connector->base); + encoder->base.crtc->state->encoder_mask |= + 1 << drm_encoder_index(&encoder->base); } } else { @@ -15778,6 +15910,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev) modeset_put_power_domains(dev_priv, put_domains); } intel_display_set_init_power(dev_priv, false); + + intel_fbc_init_pipe_state(dev_priv); } void intel_display_resume(struct drm_device *dev) @@ -15907,7 +16041,7 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_unregister_dsm_handler(); - intel_fbc_disable(dev_priv); + intel_fbc_global_disable(dev_priv); /* flush any delayed tasks or pending work */ flush_scheduled_work(); @@ -16117,7 +16251,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, for_each_pipe(dev_priv, i) { err_printf(m, "Pipe [%d]:\n", i); err_printf(m, " Power: %s\n", - error->pipe[i].power_domain_on ? "on" : "off"); + onoff(error->pipe[i].power_domain_on)); err_printf(m, " SRC: %08x\n", error->pipe[i].source); err_printf(m, " STAT: %08x\n", error->pipe[i].stat); @@ -16145,7 +16279,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, "CPU transcoder: %c\n", transcoder_name(error->transcoder[i].cpu_transcoder)); err_printf(m, " Power: %s\n", - error->transcoder[i].power_domain_on ? "on" : "off"); + onoff(error->transcoder[i].power_domain_on)); err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); @@ -16155,24 +16289,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); } } - -void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) -{ - struct intel_crtc *crtc; - - for_each_intel_crtc(dev, crtc) { - struct intel_unpin_work *work; - - spin_lock_irq(&dev->event_lock); - - work = crtc->unpin_work; - - if (work && work->event && - work->event->base.file_priv == file) { - kfree(work->event); - work->event = NULL; - } - - spin_unlock_irq(&dev->event_lock); - } -} diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 796e3d313cb9..23599c36503f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -157,14 +157,9 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; u8 source_max, sink_max; - source_max = 4; - if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && - (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) - source_max = 2; - + source_max = intel_dig_port->max_lanes; sink_max = drm_dp_max_lane_count(intel_dp->dpcd); return min(source_max, sink_max); @@ -208,6 +203,7 @@ intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; int target_clock = mode->clock; int max_rate, mode_rate, max_lanes, max_link_clock; + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; if (is_edp(intel_dp) && fixed_mode) { if (mode->hdisplay > fixed_mode->hdisplay) @@ -225,7 +221,7 @@ intel_dp_mode_valid(struct drm_connector *connector, max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, 18); - if (mode_rate > max_rate) + if (mode_rate > max_rate || target_clock > max_dotclk) return MODE_CLOCK_HIGH; if (mode->clock < 10000) @@ -340,8 +336,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) release_cl_override = IS_CHERRYVIEW(dev) && !chv_phy_powergate_ch(dev_priv, phy, ch, true); - vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ? - &chv_dpll[0].dpll : &vlv_dpll[0].dpll); + if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ? + &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { + DRM_ERROR("Failed to force on pll for pipe %c!\n", + pipe_name(pipe)); + return; + } } /* @@ -980,7 +980,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (WARN_ON(txsize > 20)) return -E2BIG; - memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); + if (msg->buffer) + memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); + else + WARN_ON(msg->size); ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); if (ret > 0) { @@ -1189,7 +1192,6 @@ intel_dp_aux_fini(struct intel_dp *intel_dp) static int intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->port; int ret; @@ -1200,7 +1202,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) if (!intel_dp->aux.name) return -ENOMEM; - intel_dp->aux.dev = dev->dev; + intel_dp->aux.dev = connector->base.kdev; intel_dp->aux.transfer = intel_dp_aux_transfer; DRM_DEBUG_KMS("registering %s bus for %s\n", @@ -1215,16 +1217,6 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) return ret; } - ret = sysfs_create_link(&connector->base.kdev->kobj, - &intel_dp->aux.ddc.dev.kobj, - intel_dp->aux.ddc.dev.kobj.name); - if (ret < 0) { - DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", - intel_dp->aux.name, ret); - intel_dp_aux_fini(intel_dp); - return ret; - } - return 0; } @@ -1233,9 +1225,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) { struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); - if (!intel_connector->mst_port) - sysfs_remove_link(&intel_connector->base.kdev->kobj, - intel_dp->aux.ddc.dev.kobj.name); + intel_dp_aux_fini(intel_dp); intel_connector_unregister(intel_connector); } @@ -1812,12 +1802,21 @@ static void wait_panel_off(struct intel_dp *intel_dp) static void wait_panel_power_cycle(struct intel_dp *intel_dp) { + ktime_t panel_power_on_time; + s64 panel_power_off_duration; + DRM_DEBUG_KMS("Wait for panel power cycle\n"); + /* take the difference of currrent time and panel power off time + * and then make panel wait for t11_t12 if needed. */ + panel_power_on_time = ktime_get_boottime(); + panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); + /* When we disable the VDD override bit last we have to do the manual * wait. */ - wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle, - intel_dp->panel_power_cycle_delay); + if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) + wait_remaining_ms_from_jiffies(jiffies, + intel_dp->panel_power_cycle_delay - panel_power_off_duration); wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); } @@ -1969,7 +1968,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); if ((pp & POWER_TARGET_ON) == 0) - intel_dp->last_power_cycle = jiffies; + intel_dp->panel_power_off_time = ktime_get_boottime(); power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_put(dev_priv, power_domain); @@ -2118,7 +2117,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); - intel_dp->last_power_cycle = jiffies; + intel_dp->panel_power_off_time = ktime_get_boottime(); wait_panel_off(intel_dp); /* We got a reference when we enabled the VDD. */ @@ -2243,11 +2242,6 @@ static void intel_edp_backlight_power(struct intel_connector *connector, _intel_edp_backlight_off(intel_dp); } -static const char *state_string(bool enabled) -{ - return enabled ? "on" : "off"; -} - static void assert_dp_port(struct intel_dp *intel_dp, bool state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -2257,7 +2251,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) I915_STATE_WARN(cur_state != state, "DP port %c state assertion failure (expected %s, current %s)\n", port_name(dig_port->port), - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_dp_port_disabled(d) assert_dp_port((d), false) @@ -2267,7 +2261,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) I915_STATE_WARN(cur_state != state, "eDP PLL state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) @@ -4014,7 +4008,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) } while (--attempts && count); if (attempts == 0) { - DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n"); + DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n"); ret = -ETIMEDOUT; } @@ -4874,7 +4868,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &intel_dig_port->dp; - intel_dp_aux_fini(intel_dp); intel_dp_mst_encoder_cleanup(intel_dig_port); if (is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); @@ -5122,7 +5115,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) { - intel_dp->last_power_cycle = jiffies; + intel_dp->panel_power_off_time = ktime_get_boottime(); intel_dp->last_power_on = jiffies; intel_dp->last_backlight_off = jiffies; } @@ -5839,6 +5832,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, enum port port = intel_dig_port->port; int type, ret; + if (WARN(intel_dig_port->max_lanes < 1, + "Not enough lanes (%d) for DP on port %c\n", + intel_dig_port->max_lanes, port_name(port))) + return false; + intel_dp->pps_pipe = INVALID_PIPE; /* intel_dp vfuncs */ @@ -6037,6 +6035,7 @@ intel_dp_init(struct drm_device *dev, intel_dig_port->port = port; dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->dp.output_reg = output_reg; + intel_dig_port->max_lanes = 4; intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; if (IS_CHERRYVIEW(dev)) { diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 88887938e0bf..0b8eefc2acc5 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -215,27 +215,46 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) } } -static void -intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) +/* + * Pick training pattern for channel equalization. Training Pattern 3 for HBR2 + * or 1.2 devices that support it, Training Pattern 2 otherwise. + */ +static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) { - bool channel_eq = false; - int tries, cr_tries; - uint32_t training_pattern = DP_TRAINING_PATTERN_2; + u32 training_pattern = DP_TRAINING_PATTERN_2; + bool source_tps3, sink_tps3; /* - * Training Pattern 3 for HBR2 or 1.2 devices that support it. - * * Intel platforms that support HBR2 also support TPS3. TPS3 support is - * also mandatory for downstream devices that support HBR2. + * also mandatory for downstream devices that support HBR2. However, not + * all sinks follow the spec. * * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is - * supported but still not enabled. + * supported in source but still not enabled. */ - if (intel_dp_source_supports_hbr2(intel_dp) && - drm_dp_tps3_supported(intel_dp->dpcd)) + source_tps3 = intel_dp_source_supports_hbr2(intel_dp); + sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd); + + if (source_tps3 && sink_tps3) { training_pattern = DP_TRAINING_PATTERN_3; - else if (intel_dp->link_rate == 540000) - DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n"); + } else if (intel_dp->link_rate == 540000) { + if (!source_tps3) + DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n"); + if (!sink_tps3) + DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n"); + } + + return training_pattern; +} + +static void +intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) +{ + bool channel_eq = false; + int tries, cr_tries; + u32 training_pattern; + + training_pattern = intel_dp_training_pattern(intel_dp); /* channel equalization */ if (!intel_dp_set_link_train(intel_dp, diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index fa0dabf578dc..a2bd698fe2f7 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -184,7 +184,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) intel_mst->port = found->port; if (intel_dp->active_mst_links == 0) { - intel_ddi_clk_select(encoder, intel_crtc->config); + intel_prepare_ddi_buffer(&intel_dig_port->base); + + intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config); intel_dp_set_link_params(intel_dp, intel_crtc->config); @@ -369,6 +371,8 @@ static enum drm_mode_status intel_dp_mst_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + /* TODO - validate mode against available PBN for link */ if (mode->clock < 10000) return MODE_CLOCK_LOW; @@ -376,6 +380,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_H_ILLEGAL; + if (mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + return MODE_OK; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ea5415851c6e..3cae3768ea37 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -246,7 +246,18 @@ struct intel_atomic_state { struct drm_atomic_state base; unsigned int cdclk; - bool dpll_set; + + /* + * Calculated device cdclk, can be different from cdclk + * only when all crtc's are DPMS off. + */ + unsigned int dev_cdclk; + + bool dpll_set, modeset; + + unsigned int active_crtcs; + unsigned int min_pixclk[I915_MAX_PIPES]; + struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; struct intel_wm_config wm_config; }; @@ -481,6 +492,8 @@ struct intel_crtc_state { bool ips_enabled; + bool enable_fbc; + bool double_wide; bool dp_encoder_is_mst; @@ -531,16 +544,15 @@ struct intel_mmio_flip { */ struct intel_crtc_atomic_commit { /* Sleepable operations to perform before commit */ - bool disable_fbc; - bool disable_ips; - bool pre_disable_primary; /* Sleepable operations to perform after commit */ unsigned fb_bits; bool wait_vblank; - bool update_fbc; bool post_enable_primary; unsigned update_sprite_watermarks; + + /* Sleepable operations to perform before and after commit */ + bool update_fbc; }; struct intel_crtc { @@ -564,7 +576,7 @@ struct intel_crtc { /* Display surface base address adjustement for pageflips. Note that on * gen4+ this only adjusts up to a tile, offsets within a tile are * handled in the hw itself (with the TILEOFF register). */ - unsigned long dspaddr_offset; + u32 dspaddr_offset; int adjusted_x; int adjusted_y; @@ -647,23 +659,17 @@ struct intel_plane { /* * NOTE: Do not place new plane state fields here (e.g., when adding * new plane properties). New runtime state should now be placed in - * the intel_plane_state structure and accessed via drm_plane->state. + * the intel_plane_state structure and accessed via plane_state. */ void (*update_plane)(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h); + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); void (*disable_plane)(struct drm_plane *plane, struct drm_crtc *crtc); int (*check_plane)(struct drm_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state); - void (*commit_plane)(struct drm_plane *plane, - struct intel_plane_state *state); }; struct intel_watermark_params { @@ -765,9 +771,9 @@ struct intel_dp { int backlight_off_delay; struct delayed_work panel_vdd_work; bool want_panel_vdd; - unsigned long last_power_cycle; unsigned long last_power_on; unsigned long last_backlight_off; + ktime_t panel_power_off_time; struct notifier_block edp_notifier; @@ -817,6 +823,7 @@ struct intel_digital_port { struct intel_hdmi hdmi; enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); bool release_cl2_override; + uint8_t max_lanes; /* for communication with audio component; protected by av_mutex */ const struct drm_connector *audio_connector; }; @@ -996,7 +1003,7 @@ void intel_crt_init(struct drm_device *dev); /* intel_ddi.c */ void intel_ddi_clk_select(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); -void intel_prepare_ddi(struct drm_device *dev); +void intel_prepare_ddi_buffer(struct intel_encoder *encoder); void hsw_fdi_link_train(struct drm_crtc *crtc); void intel_ddi_init(struct drm_device *dev, enum port port); enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); @@ -1041,8 +1048,8 @@ unsigned int intel_fb_align_height(struct drm_device *dev, uint64_t fb_format_modifier); void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire, enum fb_op_origin origin); -u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, - uint32_t pixel_format); +u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, uint32_t pixel_format); /* intel_audio.c */ void intel_init_audio(struct drm_device *dev); @@ -1126,9 +1133,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane, int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state); -unsigned int -intel_tile_height(struct drm_device *dev, uint32_t pixel_format, - uint64_t fb_format_modifier, unsigned int plane); +unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, unsigned int cpp); static inline bool intel_rotation_90_or_270(unsigned int rotation) @@ -1149,8 +1155,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, struct intel_crtc_state *state); -void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, - const struct dpll *dpll); +int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, + const struct dpll *dpll); void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe); /* modesetting asserts */ @@ -1167,11 +1173,11 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) -unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv, - int *x, int *y, - unsigned int tiling_mode, - unsigned int bpp, - unsigned int pitch); +u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv, + int *x, int *y, + uint64_t fb_modifier, + unsigned int cpp, + unsigned int pitch); void intel_prepare_reset(struct drm_device *dev); void intel_finish_reset(struct drm_device *dev); void hsw_enable_pc8(struct drm_i915_private *dev_priv); @@ -1207,7 +1213,6 @@ enum intel_display_power_domain intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder); void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_state *pipe_config); -void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); @@ -1323,13 +1328,16 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) #endif /* intel_fbc.c */ +void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, + struct drm_atomic_state *state); bool intel_fbc_is_active(struct drm_i915_private *dev_priv); -void intel_fbc_deactivate(struct intel_crtc *crtc); -void intel_fbc_update(struct intel_crtc *crtc); +void intel_fbc_pre_update(struct intel_crtc *crtc); +void intel_fbc_post_update(struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); +void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); void intel_fbc_enable(struct intel_crtc *crtc); -void intel_fbc_disable(struct drm_i915_private *dev_priv); -void intel_fbc_disable_crtc(struct intel_crtc *crtc); +void intel_fbc_disable(struct intel_crtc *crtc); +void intel_fbc_global_disable(struct drm_i915_private *dev_priv); void intel_fbc_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin); @@ -1555,6 +1563,7 @@ void skl_wm_get_hw_state(struct drm_device *dev); void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); +int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); /* intel_sdvo.c */ bool intel_sdvo_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 44742fa2f616..378f879f4015 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -478,8 +478,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) DRM_DEBUG_KMS("\n"); - intel_dsi_prepare(encoder); intel_enable_dsi_pll(encoder); + intel_dsi_prepare(encoder); /* Panel Enable over CRC PMIC */ if (intel_dsi->gpio_panel) @@ -702,7 +702,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - u32 pclk = 0; + u32 pclk; DRM_DEBUG_KMS("\n"); pipe_config->has_dsi_encoder = true; @@ -713,12 +713,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, */ pipe_config->dpll_hw_state.dpll_md = 0; - if (IS_BROXTON(encoder->base.dev)) - pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp); - else if (IS_VALLEYVIEW(encoder->base.dev) || - IS_CHERRYVIEW(encoder->base.dev)) - pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); - + pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp); if (!pclk) return; diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 02551ff228c2..de7be7f3fb42 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -126,8 +126,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) extern void intel_enable_dsi_pll(struct intel_encoder *encoder); extern void intel_disable_dsi_pll(struct intel_encoder *encoder); -extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); -extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); +extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp); extern void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index a5e99ac305da..787f01c63984 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -204,10 +204,28 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + if (dev_priv->vbt.dsi.seq_version >= 3) + data++; + gpio = *data++; /* pull up/down */ - action = *data++; + action = *data++ & 1; + + if (gpio >= ARRAY_SIZE(gtable)) { + DRM_DEBUG_KMS("unknown gpio %u\n", gpio); + goto out; + } + + if (!IS_VALLEYVIEW(dev_priv)) { + DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); + goto out; + } + + if (dev_priv->vbt.dsi.seq_version >= 3) { + DRM_DEBUG_KMS("GPIO element v3 not supported\n"); + goto out; + } function = gtable[gpio].function_reg; pad = gtable[gpio].pad_reg; @@ -216,27 +234,33 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) if (!gtable[gpio].init) { /* program the function */ /* FIXME: remove constant below */ - vlv_gpio_nc_write(dev_priv, function, 0x2000CC00); + vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function, + 0x2000CC00); gtable[gpio].init = 1; } val = 0x4 | action; /* pull up/down */ - vlv_gpio_nc_write(dev_priv, pad, val); + vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val); mutex_unlock(&dev_priv->sb_lock); +out: return data; } +static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data) +{ + return data + *(data + 6) + 7; +} + typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, const u8 *data); static const fn_mipi_elem_exec exec_elem[] = { - NULL, /* reserved */ - mipi_exec_send_packet, - mipi_exec_delay, - mipi_exec_gpio, - NULL, /* status read; later */ + [MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet, + [MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay, + [MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio, + [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip, }; /* @@ -246,107 +270,114 @@ static const fn_mipi_elem_exec exec_elem[] = { */ static const char * const seq_name[] = { - "UNDEFINED", - "MIPI_SEQ_ASSERT_RESET", - "MIPI_SEQ_INIT_OTP", - "MIPI_SEQ_DISPLAY_ON", - "MIPI_SEQ_DISPLAY_OFF", - "MIPI_SEQ_DEASSERT_RESET" + [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", + [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP", + [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON", + [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF", + [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", + [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON", + [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF", + [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON", + [MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF", + [MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON", + [MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF", }; -static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data) +static const char *sequence_name(enum mipi_seq seq_id) { + if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id]) + return seq_name[seq_id]; + else + return "(unknown)"; +} + +static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id) +{ + struct vbt_panel *vbt_panel = to_vbt_panel(panel); + struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + const u8 *data; fn_mipi_elem_exec mipi_elem_exec; - int index; - if (!data) + if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence))) return; - DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]); + data = dev_priv->vbt.dsi.sequence[seq_id]; + if (!data) { + DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n", + seq_id, sequence_name(seq_id)); + return; + } - /* go to the first element of the sequence */ - data++; + WARN_ON(*data != seq_id); - /* parse each byte till we reach end of sequence byte - 0x00 */ - while (1) { - index = *data; - mipi_elem_exec = exec_elem[index]; - if (!mipi_elem_exec) { - DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n"); - return; - } + DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n", + seq_id, sequence_name(seq_id)); - /* goto element payload */ - data++; + /* Skip Sequence Byte. */ + data++; - /* execute the element specific rotines */ - data = mipi_elem_exec(intel_dsi, data); + /* Skip Size of Sequence. */ + if (dev_priv->vbt.dsi.seq_version >= 3) + data += 4; - /* - * After processing the element, data should point to - * next element or end of sequence - * check if have we reached end of sequence - */ - if (*data == 0x00) + while (1) { + u8 operation_byte = *data++; + u8 operation_size = 0; + + if (operation_byte == MIPI_SEQ_ELEM_END) break; + + if (operation_byte < ARRAY_SIZE(exec_elem)) + mipi_elem_exec = exec_elem[operation_byte]; + else + mipi_elem_exec = NULL; + + /* Size of Operation. */ + if (dev_priv->vbt.dsi.seq_version >= 3) + operation_size = *data++; + + if (mipi_elem_exec) { + data = mipi_elem_exec(intel_dsi, data); + } else if (operation_size) { + /* We have size, skip. */ + DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n", + operation_byte); + data += operation_size; + } else { + /* No size, can't skip without parsing. */ + DRM_ERROR("Unsupported MIPI operation byte %u\n", + operation_byte); + return; + } } } static int vbt_panel_prepare(struct drm_panel *panel) { - struct vbt_panel *vbt_panel = to_vbt_panel(panel); - struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const u8 *sequence; - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET]; - generic_exec_sequence(intel_dsi, sequence); - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; - generic_exec_sequence(intel_dsi, sequence); + generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET); + generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP); return 0; } static int vbt_panel_unprepare(struct drm_panel *panel) { - struct vbt_panel *vbt_panel = to_vbt_panel(panel); - struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const u8 *sequence; - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]; - generic_exec_sequence(intel_dsi, sequence); + generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET); return 0; } static int vbt_panel_enable(struct drm_panel *panel) { - struct vbt_panel *vbt_panel = to_vbt_panel(panel); - struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const u8 *sequence; - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]; - generic_exec_sequence(intel_dsi, sequence); + generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON); return 0; } static int vbt_panel_disable(struct drm_panel *panel) { - struct vbt_panel *vbt_panel = to_vbt_panel(panel); - struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const u8 *sequence; - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF]; - generic_exec_sequence(intel_dsi, sequence); + generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF); return 0; } @@ -666,6 +697,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) /* This is cheating a bit with the cleanup. */ vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL); + if (!vbt_panel) + return NULL; vbt_panel->intel_dsi = intel_dsi; drm_panel_init(&vbt_panel->panel); diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index fbd2b51810ca..bb5e95a1a453 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -30,14 +30,6 @@ #include "i915_drv.h" #include "intel_dsi.h" -#define DSI_HSS_PACKET_SIZE 4 -#define DSI_HSE_PACKET_SIZE 4 -#define DSI_HSA_PACKET_EXTRA_SIZE 6 -#define DSI_HBP_PACKET_EXTRA_SIZE 6 -#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6 -#define DSI_HFP_PACKET_EXTRA_SIZE 6 -#define DSI_EOTP_PACKET_SIZE 4 - static int dsi_pixel_format_bpp(int pixel_format) { int bpp; @@ -71,77 +63,6 @@ static const u32 lfsr_converts[] = { 71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */ }; -#ifdef DSI_CLK_FROM_RR - -static u32 dsi_rr_formula(const struct drm_display_mode *mode, - int pixel_format, int video_mode_format, - int lane_count, bool eotp) -{ - u32 bpp; - u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp; - u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes; - u32 bytes_per_line, bytes_per_frame; - u32 num_frames; - u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes; - u32 dsi_bit_clock_hz; - u32 dsi_clk; - - bpp = dsi_pixel_format_bpp(pixel_format); - - hactive = mode->hdisplay; - vactive = mode->vdisplay; - hfp = mode->hsync_start - mode->hdisplay; - hsync = mode->hsync_end - mode->hsync_start; - hbp = mode->htotal - mode->hsync_end; - - vfp = mode->vsync_start - mode->vdisplay; - vsync = mode->vsync_end - mode->vsync_start; - vbp = mode->vtotal - mode->vsync_end; - - hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8); - hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8); - hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8); - hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8); - - bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes + - DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE + - hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE + - hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE + - hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE; - - /* - * XXX: Need to accurately calculate LP to HS transition timeout and add - * it to bytes_per_line/bytes_per_frame. - */ - - if (eotp && video_mode_format == VIDEO_MODE_BURST) - bytes_per_line += DSI_EOTP_PACKET_SIZE; - - bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line + - vactive * bytes_per_line + vfp * bytes_per_line; - - if (eotp && - (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE || - video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS)) - bytes_per_frame += DSI_EOTP_PACKET_SIZE; - - num_frames = drm_mode_vrefresh(mode); - bytes_per_x_frames = num_frames * bytes_per_frame; - - bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count; - - /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */ - dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8; - dsi_clk = dsi_bit_clock_hz / 1000; - - if (eotp && video_mode_format == VIDEO_MODE_BURST) - dsi_clk *= 2; - - return dsi_clk; -} - -#else - /* Get DSI clock from pixel clock */ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) { @@ -155,8 +76,6 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) return dsi_clk_khz; } -#endif - static int dsi_calc_mnp(struct drm_i915_private *dev_priv, struct dsi_mnp *dsi_mnp, int target_dsi_clk) { @@ -322,7 +241,7 @@ static void assert_bpp_mismatch(int pixel_format, int pipe_bpp) bpp, pipe_bpp); } -u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) +static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -384,7 +303,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) return pclk; } -u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) +static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) { u32 pclk; u32 dsi_clk; @@ -419,6 +338,14 @@ u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) return pclk; } +u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) +{ + if (IS_BROXTON(encoder->base.dev)) + return bxt_dsi_get_pclk(encoder, pipe_bpp); + else + return vlv_dsi_get_pclk(encoder, pipe_bpp); +} + static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 temp; diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index a1988a486b92..3614a951736b 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -43,7 +43,7 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv) { - return dev_priv->fbc.activate != NULL; + return HAS_FBC(dev_priv); } static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) @@ -56,6 +56,11 @@ static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) return INTEL_INFO(dev_priv)->gen < 4; } +static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) +{ + return INTEL_INFO(dev_priv)->gen <= 3; +} + /* * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's @@ -74,19 +79,17 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value * we wrote to PIPESRC. */ -static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc, +static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, int *width, int *height) { - struct intel_plane_state *plane_state = - to_intel_plane_state(crtc->base.primary->state); int w, h; - if (intel_rotation_90_or_270(plane_state->base.rotation)) { - w = drm_rect_height(&plane_state->src) >> 16; - h = drm_rect_width(&plane_state->src) >> 16; + if (intel_rotation_90_or_270(cache->plane.rotation)) { + w = cache->plane.src_h; + h = cache->plane.src_w; } else { - w = drm_rect_width(&plane_state->src) >> 16; - h = drm_rect_height(&plane_state->src) >> 16; + w = cache->plane.src_w; + h = cache->plane.src_h; } if (width) @@ -95,26 +98,23 @@ static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc, *height = h; } -static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc, - struct drm_framebuffer *fb) +static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, + struct intel_fbc_state_cache *cache) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; int lines; - intel_fbc_get_plane_source_size(crtc, NULL, &lines); + intel_fbc_get_plane_source_size(cache, NULL, &lines); if (INTEL_INFO(dev_priv)->gen >= 7) lines = min(lines, 2048); /* Hardware needs the full buffer stride, not just the active area. */ - return lines * fb->pitches[0]; + return lines * cache->fb.stride; } static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) { u32 fbc_ctl; - dev_priv->fbc.active = false; - /* Disable compression */ fbc_ctl = I915_READ(FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) @@ -130,21 +130,17 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) } } -static void i8xx_fbc_activate(struct intel_crtc *crtc) +static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; int cfb_pitch; int i; u32 fbc_ctl; - dev_priv->fbc.active = true; - /* Note: fbc.threshold == 1 for i8xx */ - cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE; - if (fb->pitches[0] < cfb_pitch) - cfb_pitch = fb->pitches[0]; + cfb_pitch = params->cfb_size / FBC_LL_SIZE; + if (params->fb.stride < cfb_pitch) + cfb_pitch = params->fb.stride; /* FBC_CTL wants 32B or 64B units */ if (IS_GEN2(dev_priv)) @@ -161,9 +157,9 @@ static void i8xx_fbc_activate(struct intel_crtc *crtc) /* Set it up... */ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; - fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane); + fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane); I915_WRITE(FBC_CONTROL2, fbc_ctl2); - I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc)); + I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); } /* enable it... */ @@ -173,7 +169,7 @@ static void i8xx_fbc_activate(struct intel_crtc *crtc) if (IS_I945GM(dev_priv)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; - fbc_ctl |= obj->fence_reg; + fbc_ctl |= params->fb.fence_reg; I915_WRITE(FBC_CONTROL, fbc_ctl); } @@ -182,23 +178,19 @@ static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) return I915_READ(FBC_CONTROL) & FBC_CTL_EN; } -static void g4x_fbc_activate(struct intel_crtc *crtc) +static void g4x_fbc_activate(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; - dev_priv->fbc.active = true; - - dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN; - if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN; + if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) dpfc_ctl |= DPFC_CTL_LIMIT_2X; else dpfc_ctl |= DPFC_CTL_LIMIT_1X; - dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; - I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc)); + I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); /* enable it... */ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -208,8 +200,6 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) { u32 dpfc_ctl; - dev_priv->fbc.active = false; - /* Disable compression */ dpfc_ctl = I915_READ(DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { @@ -230,19 +220,14 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv) POSTING_READ(MSG_FBC_REND_STATE); } -static void ilk_fbc_activate(struct intel_crtc *crtc) +static void ilk_fbc_activate(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; - unsigned int y_offset; - dev_priv->fbc.active = true; - - dpfc_ctl = DPFC_CTL_PLANE(crtc->plane); - if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane); + if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) threshold++; switch (threshold) { @@ -259,18 +244,17 @@ static void ilk_fbc_activate(struct intel_crtc *crtc) } dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev_priv)) - dpfc_ctl |= obj->fence_reg; + dpfc_ctl |= params->fb.fence_reg; - y_offset = get_crtc_fence_y_offset(crtc); - I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset); - I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); + I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); + I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); if (IS_GEN6(dev_priv)) { I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | obj->fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset); + SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } intel_fbc_recompress(dev_priv); @@ -280,8 +264,6 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) { u32 dpfc_ctl; - dev_priv->fbc.active = false; - /* Disable compression */ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { @@ -295,21 +277,17 @@ static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; } -static void gen7_fbc_activate(struct intel_crtc *crtc) +static void gen7_fbc_activate(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; - dev_priv->fbc.active = true; - dpfc_ctl = 0; if (IS_IVYBRIDGE(dev_priv)) - dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane); + dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); - if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) threshold++; switch (threshold) { @@ -337,20 +315,60 @@ static void gen7_fbc_activate(struct intel_crtc *crtc) ILK_FBCQ_DIS); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ - I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe), - I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) | + I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), + I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); } I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | obj->fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc)); + SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); intel_fbc_recompress(dev_priv); } +static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +{ + if (INTEL_INFO(dev_priv)->gen >= 5) + return ilk_fbc_is_active(dev_priv); + else if (IS_GM45(dev_priv)) + return g4x_fbc_is_active(dev_priv); + else + return i8xx_fbc_is_active(dev_priv); +} + +static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + fbc->active = true; + + if (INTEL_INFO(dev_priv)->gen >= 7) + gen7_fbc_activate(dev_priv); + else if (INTEL_INFO(dev_priv)->gen >= 5) + ilk_fbc_activate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_activate(dev_priv); + else + i8xx_fbc_activate(dev_priv); +} + +static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + fbc->active = false; + + if (INTEL_INFO(dev_priv)->gen >= 5) + ilk_fbc_deactivate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_deactivate(dev_priv); + else + i8xx_fbc_deactivate(dev_priv); +} + /** * intel_fbc_is_active - Is FBC active? * @dev_priv: i915 device instance @@ -364,24 +382,24 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv) return dev_priv->fbc.active; } -static void intel_fbc_activate(const struct drm_framebuffer *fb) -{ - struct drm_i915_private *dev_priv = fb->dev->dev_private; - struct intel_crtc *crtc = dev_priv->fbc.crtc; - - dev_priv->fbc.activate(crtc); - - dev_priv->fbc.fb_id = fb->base.id; - dev_priv->fbc.y = crtc->base.y; -} - static void intel_fbc_work_fn(struct work_struct *__work) { struct drm_i915_private *dev_priv = container_of(__work, struct drm_i915_private, fbc.work.work); - struct intel_fbc_work *work = &dev_priv->fbc.work; - struct intel_crtc *crtc = dev_priv->fbc.crtc; - int delay_ms = 50; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_work *work = &fbc->work; + struct intel_crtc *crtc = fbc->crtc; + struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe]; + + if (drm_crtc_vblank_get(&crtc->base)) { + DRM_ERROR("vblank not available for FBC on pipe %c\n", + pipe_name(crtc->pipe)); + + mutex_lock(&fbc->lock); + work->scheduled = false; + mutex_unlock(&fbc->lock); + return; + } retry: /* Delay the actual enabling to let pageflipping cease and the @@ -390,142 +408,97 @@ retry: * vblank to pass after disabling the FBC before we attempt * to modify the control registers. * - * A more complicated solution would involve tracking vblanks - * following the termination of the page-flipping sequence - * and indeed performing the enable as a co-routine and not - * waiting synchronously upon the vblank. - * * WaFbcWaitForVBlankBeforeEnable:ilk,snb + * + * It is also worth mentioning that since work->scheduled_vblank can be + * updated multiple times by the other threads, hitting the timeout is + * not an error condition. We'll just end up hitting the "goto retry" + * case below. */ - wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms); + wait_event_timeout(vblank->queue, + drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank, + msecs_to_jiffies(50)); - mutex_lock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); /* Were we cancelled? */ if (!work->scheduled) goto out; /* Were we delayed again while this function was sleeping? */ - if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms), - jiffies)) { - mutex_unlock(&dev_priv->fbc.lock); + if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) { + mutex_unlock(&fbc->lock); goto retry; } - if (crtc->base.primary->fb == work->fb) - intel_fbc_activate(work->fb); + intel_fbc_hw_activate(dev_priv); work->scheduled = false; out: - mutex_unlock(&dev_priv->fbc.lock); -} - -static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) -{ - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); - dev_priv->fbc.work.scheduled = false; + mutex_unlock(&fbc->lock); + drm_crtc_vblank_put(&crtc->base); } static void intel_fbc_schedule_activation(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct intel_fbc_work *work = &dev_priv->fbc.work; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_work *work = &fbc->work; - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); + WARN_ON(!mutex_is_locked(&fbc->lock)); - /* It is useless to call intel_fbc_cancel_work() in this function since - * we're not releasing fbc.lock, so it won't have an opportunity to grab - * it to discover that it was cancelled. So we just update the expected - * jiffy count. */ - work->fb = crtc->base.primary->fb; + if (drm_crtc_vblank_get(&crtc->base)) { + DRM_ERROR("vblank not available for FBC on pipe %c\n", + pipe_name(crtc->pipe)); + return; + } + + /* It is useless to call intel_fbc_cancel_work() or cancel_work() in + * this function since we're not releasing fbc.lock, so it won't have an + * opportunity to grab it to discover that it was cancelled. So we just + * update the expected jiffy count. */ work->scheduled = true; - work->enable_jiffies = jiffies; + work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base); + drm_crtc_vblank_put(&crtc->base); schedule_work(&work->work); } -static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv) +static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) { - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); + struct intel_fbc *fbc = &dev_priv->fbc; - intel_fbc_cancel_work(dev_priv); + WARN_ON(!mutex_is_locked(&fbc->lock)); - if (dev_priv->fbc.active) - dev_priv->fbc.deactivate(dev_priv); -} + /* Calling cancel_work() here won't help due to the fact that the work + * function grabs fbc->lock. Just set scheduled to false so the work + * function can know it was cancelled. */ + fbc->work.scheduled = false; -/* - * intel_fbc_deactivate - deactivate FBC if it's associated with crtc - * @crtc: the CRTC - * - * This function deactivates FBC if it's associated with the provided CRTC. - */ -void intel_fbc_deactivate(struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - - if (!fbc_supported(dev_priv)) - return; - - mutex_lock(&dev_priv->fbc.lock); - if (dev_priv->fbc.crtc == crtc) - __intel_fbc_deactivate(dev_priv); - mutex_unlock(&dev_priv->fbc.lock); + if (fbc->active) + intel_fbc_hw_deactivate(dev_priv); } -static void set_no_fbc_reason(struct drm_i915_private *dev_priv, - const char *reason) -{ - if (dev_priv->fbc.no_fbc_reason == reason) - return; - - dev_priv->fbc.no_fbc_reason = reason; - DRM_DEBUG_KMS("Disabling FBC: %s\n", reason); -} - -static bool crtc_can_fbc(struct intel_crtc *crtc) +static bool multiple_pipes_ok(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_plane *primary = crtc->base.primary; + struct intel_fbc *fbc = &dev_priv->fbc; + enum pipe pipe = crtc->pipe; - if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) - return false; - - if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) - return false; - - return true; -} - -static bool crtc_is_valid(struct intel_crtc *crtc) -{ - if (!intel_crtc_active(&crtc->base)) - return false; - - if (!to_intel_plane_state(crtc->base.primary->state)->visible) - return false; - - return true; -} - -static bool multiple_pipes_ok(struct drm_i915_private *dev_priv) -{ - enum pipe pipe; - int n_pipes = 0; - struct drm_crtc *crtc; - - if (INTEL_INFO(dev_priv)->gen > 4) + /* Don't even bother tracking anything we don't need. */ + if (!no_fbc_on_multiple_pipes(dev_priv)) return true; - for_each_pipe(dev_priv, pipe) { - crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + WARN_ON(!drm_modeset_is_locked(&primary->mutex)); - if (intel_crtc_active(crtc) && - to_intel_plane_state(crtc->primary->state)->visible) - n_pipes++; - } + if (to_intel_plane_state(primary->state)->visible) + fbc->visible_pipes_mask |= (1 << pipe); + else + fbc->visible_pipes_mask &= ~(1 << pipe); - return (n_pipes < 2); + return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0; } static int find_compression_threshold(struct drm_i915_private *dev_priv, @@ -581,16 +554,16 @@ again: static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->state->fb; + struct intel_fbc *fbc = &dev_priv->fbc; struct drm_mm_node *uninitialized_var(compressed_llb); int size, fb_cpp, ret; - WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb)); + WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); - size = intel_fbc_calculate_cfb_size(crtc, fb); - fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0); + size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache); + fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0); - ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb, + ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, size, fb_cpp); if (!ret) goto err_llb; @@ -599,12 +572,12 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) } - dev_priv->fbc.threshold = ret; + fbc->threshold = ret; if (INTEL_INFO(dev_priv)->gen >= 5) - I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); + I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); else if (IS_GM45(dev_priv)) { - I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); + I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); } else { compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); if (!compressed_llb) @@ -615,23 +588,22 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) if (ret) goto err_fb; - dev_priv->fbc.compressed_llb = compressed_llb; + fbc->compressed_llb = compressed_llb; I915_WRITE(FBC_CFB_BASE, - dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); + dev_priv->mm.stolen_base + fbc->compressed_fb.start); I915_WRITE(FBC_LL_BASE, dev_priv->mm.stolen_base + compressed_llb->start); } DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", - dev_priv->fbc.compressed_fb.size, - dev_priv->fbc.threshold); + fbc->compressed_fb.size, fbc->threshold); return 0; err_fb: kfree(compressed_llb); - i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb); + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); err_llb: pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; @@ -639,25 +611,27 @@ err_llb: static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) { - if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb)) - i915_gem_stolen_remove_node(dev_priv, - &dev_priv->fbc.compressed_fb); - - if (dev_priv->fbc.compressed_llb) { - i915_gem_stolen_remove_node(dev_priv, - dev_priv->fbc.compressed_llb); - kfree(dev_priv->fbc.compressed_llb); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (drm_mm_node_allocated(&fbc->compressed_fb)) + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); + + if (fbc->compressed_llb) { + i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); + kfree(fbc->compressed_llb); } } void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) { + struct intel_fbc *fbc = &dev_priv->fbc; + if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); __intel_fbc_cleanup_cfb(dev_priv); - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); } static bool stride_is_valid(struct drm_i915_private *dev_priv, @@ -681,19 +655,17 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv, return true; } -static bool pixel_format_is_valid(struct drm_framebuffer *fb) +static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, + uint32_t pixel_format) { - struct drm_device *dev = fb->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - switch (fb->pixel_format) { + switch (pixel_format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: return true; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_RGB565: /* 16bpp not supported on gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return false; /* WaFbcOnly1to1Ratio:ctg */ if (IS_G4X(dev_priv)) @@ -713,6 +685,7 @@ static bool pixel_format_is_valid(struct drm_framebuffer *fb) static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; unsigned int effective_w, effective_h, max_w, max_h; if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { @@ -726,87 +699,105 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) max_h = 1536; } - intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h); + intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, + &effective_h); effective_w += crtc->adjusted_x; effective_h += crtc->adjusted_y; return effective_w <= max_w && effective_h <= max_h; } -/** - * __intel_fbc_update - activate/deactivate FBC as needed, unlocked - * @crtc: the CRTC that triggered the update - * - * This function completely reevaluates the status of FBC, then activates, - * deactivates or maintains it on the same state. - */ -static void __intel_fbc_update(struct intel_crtc *crtc) +static void intel_fbc_update_state_cache(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(crtc->base.primary->state); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj; - const struct drm_display_mode *adjusted_mode; - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); + WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex)); + WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex)); - if (!multiple_pipes_ok(dev_priv)) { - set_no_fbc_reason(dev_priv, "more than one pipe active"); - goto out_disable; - } + cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + cache->crtc.hsw_bdw_pixel_rate = + ilk_pipe_pixel_rate(crtc_state); - if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc) - return; + cache->plane.rotation = plane_state->base.rotation; + cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16; + cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16; + cache->plane.visible = plane_state->visible; - if (!crtc_is_valid(crtc)) { - set_no_fbc_reason(dev_priv, "no output"); - goto out_disable; - } + if (!cache->plane.visible) + return; - fb = crtc->base.primary->fb; obj = intel_fb_obj(fb); - adjusted_mode = &crtc->config->base.adjusted_mode; - if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || - (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { - set_no_fbc_reason(dev_priv, "incompatible mode"); - goto out_disable; + /* FIXME: We lack the proper locking here, so only run this on the + * platforms that need. */ + if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) + cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); + cache->fb.pixel_format = fb->pixel_format; + cache->fb.stride = fb->pitches[0]; + cache->fb.fence_reg = obj->fence_reg; + cache->fb.tiling_mode = obj->tiling_mode; +} + +static bool intel_fbc_can_activate(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!cache->plane.visible) { + fbc->no_fbc_reason = "primary plane not visible"; + return false; + } + + if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) || + (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) { + fbc->no_fbc_reason = "incompatible mode"; + return false; } if (!intel_fbc_hw_tracking_covers_screen(crtc)) { - set_no_fbc_reason(dev_priv, "mode too large for compression"); - goto out_disable; + fbc->no_fbc_reason = "mode too large for compression"; + return false; } /* The use of a CPU fence is mandatory in order to detect writes * by the CPU to the scanout and trigger updates to the FBC. */ - if (obj->tiling_mode != I915_TILING_X || - obj->fence_reg == I915_FENCE_REG_NONE) { - set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced"); - goto out_disable; + if (cache->fb.tiling_mode != I915_TILING_X || + cache->fb.fence_reg == I915_FENCE_REG_NONE) { + fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + return false; } if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && - crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) { - set_no_fbc_reason(dev_priv, "rotation unsupported"); - goto out_disable; + cache->plane.rotation != BIT(DRM_ROTATE_0)) { + fbc->no_fbc_reason = "rotation unsupported"; + return false; } - if (!stride_is_valid(dev_priv, fb->pitches[0])) { - set_no_fbc_reason(dev_priv, "framebuffer stride not supported"); - goto out_disable; + if (!stride_is_valid(dev_priv, cache->fb.stride)) { + fbc->no_fbc_reason = "framebuffer stride not supported"; + return false; } - if (!pixel_format_is_valid(fb)) { - set_no_fbc_reason(dev_priv, "pixel format is invalid"); - goto out_disable; + if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) { + fbc->no_fbc_reason = "pixel format is invalid"; + return false; } /* WaFbcExceedCdClockThreshold:hsw,bdw */ if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && - ilk_pipe_pixel_rate(crtc->config) >= - dev_priv->cdclk_freq * 95 / 100) { - set_no_fbc_reason(dev_priv, "pixel rate is too big"); - goto out_disable; + cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) { + fbc->no_fbc_reason = "pixel rate is too big"; + return false; } /* It is possible for the required CFB size change without a @@ -819,189 +810,320 @@ static void __intel_fbc_update(struct intel_crtc *crtc) * we didn't get any invalidate/deactivate calls, but this would require * a lot of tracking just for a specific case. If we conclude it's an * important case, we can implement it later. */ - if (intel_fbc_calculate_cfb_size(crtc, fb) > - dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) { - set_no_fbc_reason(dev_priv, "CFB requirements changed"); - goto out_disable; + if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > + fbc->compressed_fb.size * fbc->threshold) { + fbc->no_fbc_reason = "CFB requirements changed"; + return false; } + return true; +} + +static bool intel_fbc_can_choose(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + + if (intel_vgpu_active(dev_priv->dev)) { + fbc->no_fbc_reason = "VGPU is active"; + return false; + } + + if (i915.enable_fbc < 0) { + fbc->no_fbc_reason = "disabled per chip default"; + return false; + } + + if (!i915.enable_fbc) { + fbc->no_fbc_reason = "disabled per module param"; + return false; + } + + if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) { + fbc->no_fbc_reason = "no enabled pipes can have FBC"; + return false; + } + + if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) { + fbc->no_fbc_reason = "no enabled planes can have FBC"; + return false; + } + + return true; +} + +static void intel_fbc_get_reg_params(struct intel_crtc *crtc, + struct intel_fbc_reg_params *params) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + /* Since all our fields are integer types, use memset here so the + * comparison function can rely on memcmp because the padding will be + * zero. */ + memset(params, 0, sizeof(*params)); + + params->crtc.pipe = crtc->pipe; + params->crtc.plane = crtc->plane; + params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); + + params->fb.pixel_format = cache->fb.pixel_format; + params->fb.stride = cache->fb.stride; + params->fb.fence_reg = cache->fb.fence_reg; + + params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); + + params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset; +} + +static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, + struct intel_fbc_reg_params *params2) +{ + /* We can use this since intel_fbc_get_reg_params() does a memset. */ + return memcmp(params1, params2, sizeof(*params1)) == 0; +} + +void intel_fbc_pre_update(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!fbc_supported(dev_priv)) + return; + + mutex_lock(&fbc->lock); + + if (!multiple_pipes_ok(crtc)) { + fbc->no_fbc_reason = "more than one pipe active"; + goto deactivate; + } + + if (!fbc->enabled || fbc->crtc != crtc) + goto unlock; + + intel_fbc_update_state_cache(crtc); + +deactivate: + intel_fbc_deactivate(dev_priv); +unlock: + mutex_unlock(&fbc->lock); +} + +static void __intel_fbc_post_update(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_reg_params old_params; + + WARN_ON(!mutex_is_locked(&fbc->lock)); + + if (!fbc->enabled || fbc->crtc != crtc) + return; + + if (!intel_fbc_can_activate(crtc)) { + WARN_ON(fbc->active); + return; + } + + old_params = fbc->params; + intel_fbc_get_reg_params(crtc, &fbc->params); + /* If the scanout has not changed, don't modify the FBC settings. * Note that we make the fundamental assumption that the fb->obj * cannot be unpinned (and have its GTT offset and fence revoked) * without first being decoupled from the scanout and FBC disabled. */ - if (dev_priv->fbc.crtc == crtc && - dev_priv->fbc.fb_id == fb->base.id && - dev_priv->fbc.y == crtc->base.y && - dev_priv->fbc.active) + if (fbc->active && + intel_fbc_reg_params_equal(&old_params, &fbc->params)) return; - if (intel_fbc_is_active(dev_priv)) { - /* We update FBC along two paths, after changing fb/crtc - * configuration (modeswitching) and after page-flipping - * finishes. For the latter, we know that not only did - * we disable the FBC at the start of the page-flip - * sequence, but also more than one vblank has passed. - * - * For the former case of modeswitching, it is possible - * to switch between two FBC valid configurations - * instantaneously so we do need to disable the FBC - * before we can modify its control registers. We also - * have to wait for the next vblank for that to take - * effect. However, since we delay enabling FBC we can - * assume that a vblank has passed since disabling and - * that we can safely alter the registers in the deferred - * callback. - * - * In the scenario that we go from a valid to invalid - * and then back to valid FBC configuration we have - * no strict enforcement that a vblank occurred since - * disabling the FBC. However, along all current pipe - * disabling paths we do need to wait for a vblank at - * some point. And we wait before enabling FBC anyway. - */ - DRM_DEBUG_KMS("deactivating FBC for update\n"); - __intel_fbc_deactivate(dev_priv); - } - + intel_fbc_deactivate(dev_priv); intel_fbc_schedule_activation(crtc); - dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)"; - return; - -out_disable: - /* Multiple disables should be harmless */ - if (intel_fbc_is_active(dev_priv)) { - DRM_DEBUG_KMS("unsupported config, deactivating FBC\n"); - __intel_fbc_deactivate(dev_priv); - } + fbc->no_fbc_reason = "FBC enabled (active or scheduled)"; } -/* - * intel_fbc_update - activate/deactivate FBC as needed - * @crtc: the CRTC that triggered the update - * - * This function reevaluates the overall state and activates or deactivates FBC. - */ -void intel_fbc_update(struct intel_crtc *crtc) +void intel_fbc_post_update(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); - __intel_fbc_update(crtc); - mutex_unlock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); + __intel_fbc_post_update(crtc); + mutex_unlock(&fbc->lock); +} + +static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) +{ + if (fbc->enabled) + return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + else + return fbc->possible_framebuffer_bits; } void intel_fbc_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - unsigned int fbc_bits; + struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) return; - if (origin == ORIGIN_GTT) + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) return; - mutex_lock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); - if (dev_priv->fbc.enabled) - fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe); - else - fbc_bits = dev_priv->fbc.possible_framebuffer_bits; - - dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits); + fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; - if (dev_priv->fbc.busy_bits) - __intel_fbc_deactivate(dev_priv); + if (fbc->enabled && fbc->busy_bits) + intel_fbc_deactivate(dev_priv); - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); } void intel_fbc_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin) { + struct intel_fbc *fbc = &dev_priv->fbc; + if (!fbc_supported(dev_priv)) return; - if (origin == ORIGIN_GTT) + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) return; - mutex_lock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); - dev_priv->fbc.busy_bits &= ~frontbuffer_bits; + fbc->busy_bits &= ~frontbuffer_bits; - if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) { - if (origin != ORIGIN_FLIP && dev_priv->fbc.active) { + if (!fbc->busy_bits && fbc->enabled && + (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { + if (fbc->active) intel_fbc_recompress(dev_priv); - } else { - __intel_fbc_deactivate(dev_priv); - __intel_fbc_update(dev_priv->fbc.crtc); + else + __intel_fbc_post_update(fbc->crtc); + } + + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_choose_crtc - select a CRTC to enable FBC on + * @dev_priv: i915 device instance + * @state: the atomic state structure + * + * This function looks at the proposed state for CRTCs and planes, then chooses + * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to + * true. + * + * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe + * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. + */ +void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, + struct drm_atomic_state *state) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + bool fbc_crtc_present = false; + int i, j; + + mutex_lock(&fbc->lock); + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (fbc->crtc == to_intel_crtc(crtc)) { + fbc_crtc_present = true; + break; } } + /* This atomic commit doesn't involve the CRTC currently tied to FBC. */ + if (!fbc_crtc_present && fbc->crtc != NULL) + goto out; + + /* Simply choose the first CRTC that is compatible and has a visible + * plane. We could go for fancier schemes such as checking the plane + * size, but this would just affect the few platforms that don't tie FBC + * to pipe or plane A. */ + for_each_plane_in_state(state, plane, plane_state, i) { + struct intel_plane_state *intel_plane_state = + to_intel_plane_state(plane_state); + + if (!intel_plane_state->visible) + continue; - mutex_unlock(&dev_priv->fbc.lock); + for_each_crtc_in_state(state, crtc, crtc_state, j) { + struct intel_crtc_state *intel_crtc_state = + to_intel_crtc_state(crtc_state); + + if (plane_state->crtc != crtc) + continue; + + if (!intel_fbc_can_choose(to_intel_crtc(crtc))) + break; + + intel_crtc_state->enable_fbc = true; + goto out; + } + } + +out: + mutex_unlock(&fbc->lock); } /** * intel_fbc_enable: tries to enable FBC on the CRTC * @crtc: the CRTC * - * This function checks if it's possible to enable FBC on the following CRTC, - * then enables it. Notice that it doesn't activate FBC. + * This function checks if the given CRTC was chosen for FBC, then enables it if + * possible. Notice that it doesn't activate FBC. It is valid to call + * intel_fbc_enable multiple times for the same pipe without an + * intel_fbc_disable in the middle, as long as it is deactivated. */ void intel_fbc_enable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); - if (dev_priv->fbc.enabled) { - WARN_ON(dev_priv->fbc.crtc == crtc); - goto out; - } - - WARN_ON(dev_priv->fbc.active); - WARN_ON(dev_priv->fbc.crtc != NULL); - - if (intel_vgpu_active(dev_priv->dev)) { - set_no_fbc_reason(dev_priv, "VGPU is active"); - goto out; - } - - if (i915.enable_fbc < 0) { - set_no_fbc_reason(dev_priv, "disabled per chip default"); + if (fbc->enabled) { + WARN_ON(fbc->crtc == NULL); + if (fbc->crtc == crtc) { + WARN_ON(!crtc->config->enable_fbc); + WARN_ON(fbc->active); + } goto out; } - if (!i915.enable_fbc) { - set_no_fbc_reason(dev_priv, "disabled per module param"); + if (!crtc->config->enable_fbc) goto out; - } - if (!crtc_can_fbc(crtc)) { - set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC"); - goto out; - } + WARN_ON(fbc->active); + WARN_ON(fbc->crtc != NULL); + intel_fbc_update_state_cache(crtc); if (intel_fbc_alloc_cfb(crtc)) { - set_no_fbc_reason(dev_priv, "not enough stolen memory"); + fbc->no_fbc_reason = "not enough stolen memory"; goto out; } DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); - dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n"; + fbc->no_fbc_reason = "FBC enabled but not active yet\n"; - dev_priv->fbc.enabled = true; - dev_priv->fbc.crtc = crtc; + fbc->enabled = true; + fbc->crtc = crtc; out: - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); } /** @@ -1013,58 +1135,88 @@ out: */ static void __intel_fbc_disable(struct drm_i915_private *dev_priv) { - struct intel_crtc *crtc = dev_priv->fbc.crtc; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_crtc *crtc = fbc->crtc; - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); - WARN_ON(!dev_priv->fbc.enabled); - WARN_ON(dev_priv->fbc.active); - assert_pipe_disabled(dev_priv, crtc->pipe); + WARN_ON(!mutex_is_locked(&fbc->lock)); + WARN_ON(!fbc->enabled); + WARN_ON(fbc->active); + WARN_ON(crtc->active); DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); __intel_fbc_cleanup_cfb(dev_priv); - dev_priv->fbc.enabled = false; - dev_priv->fbc.crtc = NULL; + fbc->enabled = false; + fbc->crtc = NULL; } /** - * intel_fbc_disable_crtc - disable FBC if it's associated with crtc + * intel_fbc_disable - disable FBC if it's associated with crtc * @crtc: the CRTC * * This function disables FBC if it's associated with the provided CRTC. */ -void intel_fbc_disable_crtc(struct intel_crtc *crtc) +void intel_fbc_disable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); - if (dev_priv->fbc.crtc == crtc) { - WARN_ON(!dev_priv->fbc.enabled); - WARN_ON(dev_priv->fbc.active); + mutex_lock(&fbc->lock); + if (fbc->crtc == crtc) { + WARN_ON(!fbc->enabled); + WARN_ON(fbc->active); __intel_fbc_disable(dev_priv); } - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); + + cancel_work_sync(&fbc->work.work); } /** - * intel_fbc_disable - globally disable FBC + * intel_fbc_global_disable - globally disable FBC * @dev_priv: i915 device instance * * This function disables FBC regardless of which CRTC is associated with it. */ -void intel_fbc_disable(struct drm_i915_private *dev_priv) +void intel_fbc_global_disable(struct drm_i915_private *dev_priv) { + struct intel_fbc *fbc = &dev_priv->fbc; + if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); - if (dev_priv->fbc.enabled) + mutex_lock(&fbc->lock); + if (fbc->enabled) __intel_fbc_disable(dev_priv); - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); + + cancel_work_sync(&fbc->work.work); +} + +/** + * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking + * @dev_priv: i915 device instance + * + * The FBC code needs to track CRTC visibility since the older platforms can't + * have FBC enabled while multiple pipes are used. This function does the + * initial setup at driver load to make sure FBC is matching the real hardware. + */ +void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + + /* Don't even bother tracking anything if we don't need. */ + if (!no_fbc_on_multiple_pipes(dev_priv)) + return; + + for_each_intel_crtc(dev_priv->dev, crtc) + if (intel_crtc_active(&crtc->base) && + to_intel_plane_state(crtc->base.primary->state)->visible) + dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); } /** @@ -1075,51 +1227,35 @@ void intel_fbc_disable(struct drm_i915_private *dev_priv) */ void intel_fbc_init(struct drm_i915_private *dev_priv) { + struct intel_fbc *fbc = &dev_priv->fbc; enum pipe pipe; - INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn); - mutex_init(&dev_priv->fbc.lock); - dev_priv->fbc.enabled = false; - dev_priv->fbc.active = false; - dev_priv->fbc.work.scheduled = false; + INIT_WORK(&fbc->work.work, intel_fbc_work_fn); + mutex_init(&fbc->lock); + fbc->enabled = false; + fbc->active = false; + fbc->work.scheduled = false; if (!HAS_FBC(dev_priv)) { - dev_priv->fbc.no_fbc_reason = "unsupported by this chipset"; + fbc->no_fbc_reason = "unsupported by this chipset"; return; } for_each_pipe(dev_priv, pipe) { - dev_priv->fbc.possible_framebuffer_bits |= + fbc->possible_framebuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(pipe); if (fbc_on_pipe_a_only(dev_priv)) break; } - if (INTEL_INFO(dev_priv)->gen >= 7) { - dev_priv->fbc.is_active = ilk_fbc_is_active; - dev_priv->fbc.activate = gen7_fbc_activate; - dev_priv->fbc.deactivate = ilk_fbc_deactivate; - } else if (INTEL_INFO(dev_priv)->gen >= 5) { - dev_priv->fbc.is_active = ilk_fbc_is_active; - dev_priv->fbc.activate = ilk_fbc_activate; - dev_priv->fbc.deactivate = ilk_fbc_deactivate; - } else if (IS_GM45(dev_priv)) { - dev_priv->fbc.is_active = g4x_fbc_is_active; - dev_priv->fbc.activate = g4x_fbc_activate; - dev_priv->fbc.deactivate = g4x_fbc_deactivate; - } else { - dev_priv->fbc.is_active = i8xx_fbc_is_active; - dev_priv->fbc.activate = i8xx_fbc_activate; - dev_priv->fbc.deactivate = i8xx_fbc_deactivate; - - /* This value was pulled out of someone's hat */ + /* This value was pulled out of someone's hat */ + if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv)) I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); - } /* We still don't have any sort of hardware state readout for FBC, so * deactivate it in case the BIOS activated it to make sure software * matches the hardware state. */ - if (dev_priv->fbc.is_active(dev_priv)) - dev_priv->fbc.deactivate(dev_priv); + if (intel_fbc_hw_is_active(dev_priv)) + intel_fbc_hw_deactivate(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index bea75cafc623..09840f4380f9 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); - struct drm_framebuffer *fb = NULL; + struct drm_framebuffer *fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_mode_fb_cmd2 mode_cmd = {}; @@ -171,8 +171,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper, out: mutex_unlock(&dev->struct_mutex); - if (!IS_ERR_OR_NULL(fb)) - drm_framebuffer_unreference(fb); return ret; } diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 822952235dcf..73002e901ff2 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -43,9 +43,10 @@ struct i915_guc_client { uint32_t wq_offset; uint32_t wq_size; uint32_t wq_tail; + uint32_t wq_head; /* GuC submission statistics & status */ - uint64_t submissions[I915_NUM_RINGS]; + uint64_t submissions[GUC_MAX_ENGINES_NUM]; uint32_t q_fail; uint32_t b_fail; int retcode; @@ -88,6 +89,8 @@ struct intel_guc { uint32_t log_flags; struct drm_i915_gem_object *log_obj; + struct drm_i915_gem_object *ads_obj; + struct drm_i915_gem_object *ctx_pool_obj; struct ida ctx_ids; @@ -103,8 +106,8 @@ struct intel_guc { uint32_t action_fail; /* Total number of failures */ int32_t action_err; /* Last error code */ - uint64_t submissions[I915_NUM_RINGS]; - uint32_t last_seqno[I915_NUM_RINGS]; + uint64_t submissions[GUC_MAX_ENGINES_NUM]; + uint32_t last_seqno[GUC_MAX_ENGINES_NUM]; }; /* intel_guc_loader.c */ @@ -122,5 +125,6 @@ int i915_guc_submit(struct i915_guc_client *client, struct drm_i915_gem_request *rq); void i915_guc_submission_disable(struct drm_device *dev); void i915_guc_submission_fini(struct drm_device *dev); +int i915_guc_wq_check_space(struct i915_guc_client *client); #endif diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 40b2ea572e16..2de57ffe5e18 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -39,10 +39,18 @@ #define GUC_CTX_PRIORITY_HIGH 1 #define GUC_CTX_PRIORITY_KMD_NORMAL 2 #define GUC_CTX_PRIORITY_NORMAL 3 +#define GUC_CTX_PRIORITY_NUM 4 #define GUC_MAX_GPU_CONTEXTS 1024 #define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS +#define GUC_RENDER_ENGINE 0 +#define GUC_VIDEO_ENGINE 1 +#define GUC_BLITTER_ENGINE 2 +#define GUC_VIDEOENHANCE_ENGINE 3 +#define GUC_VIDEO_ENGINE2 4 +#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) + /* Work queue item header definitions */ #define WQ_STATUS_ACTIVE 1 #define WQ_STATUS_SUSPENDED 2 @@ -81,11 +89,14 @@ #define GUC_CTL_CTXINFO 0 #define GUC_CTL_CTXNUM_IN16_SHIFT 0 #define GUC_CTL_BASE_ADDR_SHIFT 12 + #define GUC_CTL_ARAT_HIGH 1 #define GUC_CTL_ARAT_LOW 2 + #define GUC_CTL_DEVICE_INFO 3 #define GUC_CTL_GTTYPE_SHIFT 0 #define GUC_CTL_COREFAMILY_SHIFT 7 + #define GUC_CTL_LOG_PARAMS 4 #define GUC_LOG_VALID (1 << 0) #define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) @@ -97,9 +108,12 @@ #define GUC_LOG_ISR_PAGES 3 #define GUC_LOG_ISR_SHIFT 9 #define GUC_LOG_BUF_ADDR_SHIFT 12 + #define GUC_CTL_PAGE_FAULT_CONTROL 5 + #define GUC_CTL_WA 6 #define GUC_CTL_WA_UK_BY_DRIVER (1 << 3) + #define GUC_CTL_FEATURE 7 #define GUC_CTL_VCS2_ENABLED (1 << 0) #define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1) @@ -109,6 +123,7 @@ #define GUC_CTL_PREEMPTION_LOG (1 << 5) #define GUC_CTL_ENABLE_SLPC (1 << 7) #define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8) + #define GUC_CTL_DEBUG 8 #define GUC_LOG_VERBOSITY_SHIFT 0 #define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) @@ -118,9 +133,19 @@ /* Verbosity range-check limits, without the shift */ #define GUC_LOG_VERBOSITY_MIN 0 #define GUC_LOG_VERBOSITY_MAX 3 +#define GUC_LOG_VERBOSITY_MASK 0x0000000f +#define GUC_LOG_DESTINATION_MASK (3 << 4) +#define GUC_LOG_DISABLED (1 << 6) +#define GUC_PROFILE_ENABLED (1 << 7) +#define GUC_WQ_TRACK_ENABLED (1 << 8) +#define GUC_ADS_ENABLED (1 << 9) +#define GUC_DEBUG_RESERVED (1 << 10) +#define GUC_ADS_ADDR_SHIFT 11 +#define GUC_ADS_ADDR_MASK 0xfffff800 + #define GUC_CTL_RSRVD 9 -#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1) +#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ /** * DOC: GuC Firmware Layout @@ -267,7 +292,7 @@ struct guc_context_desc { u64 db_trigger_phy; u16 db_id; - struct guc_execlist_context lrc[I915_NUM_RINGS]; + struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM]; u8 attribute; @@ -299,6 +324,99 @@ struct guc_context_desc { #define GUC_POWER_D2 3 #define GUC_POWER_D3 4 +/* Scheduling policy settings */ + +/* Reset engine upon preempt failure */ +#define POLICY_RESET_ENGINE (1<<0) +/* Preempt to idle on quantum expiry */ +#define POLICY_PREEMPT_TO_IDLE (1<<1) + +#define POLICY_MAX_NUM_WI 15 + +struct guc_policy { + /* Time for one workload to execute. (in micro seconds) */ + u32 execution_quantum; + u32 reserved1; + + /* Time to wait for a preemption request to completed before issuing a + * reset. (in micro seconds). */ + u32 preemption_time; + + /* How much time to allow to run after the first fault is observed. + * Then preempt afterwards. (in micro seconds) */ + u32 fault_time; + + u32 policy_flags; + u32 reserved[2]; +} __packed; + +struct guc_policies { + struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM]; + + /* In micro seconds. How much time to allow before DPC processing is + * called back via interrupt (to prevent DPC queue drain starving). + * Typically 1000s of micro seconds (example only, not granularity). */ + u32 dpc_promote_time; + + /* Must be set to take these new values. */ + u32 is_valid; + + /* Max number of WIs to process per call. A large value may keep CS + * idle. */ + u32 max_num_work_items; + + u32 reserved[19]; +} __packed; + +/* GuC MMIO reg state struct */ + +#define GUC_REGSET_FLAGS_NONE 0x0 +#define GUC_REGSET_POWERCYCLE 0x1 +#define GUC_REGSET_MASKED 0x2 +#define GUC_REGSET_ENGINERESET 0x4 +#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8 +#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10 + +#define GUC_REGSET_MAX_REGISTERS 25 +#define GUC_MMIO_WHITE_LIST_START 0x24d0 +#define GUC_MMIO_WHITE_LIST_MAX 12 +#define GUC_S3_SAVE_SPACE_PAGES 10 + +struct guc_mmio_regset { + struct __packed { + u32 offset; + u32 value; + u32 flags; + } registers[GUC_REGSET_MAX_REGISTERS]; + + u32 values_valid; + u32 number_of_registers; +} __packed; + +struct guc_mmio_reg_state { + struct guc_mmio_regset global_reg; + struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM]; + + /* MMIO registers that are set as non privileged */ + struct __packed { + u32 mmio_start; + u32 offsets[GUC_MMIO_WHITE_LIST_MAX]; + u32 count; + } mmio_white_list[GUC_MAX_ENGINES_NUM]; +} __packed; + +/* GuC Additional Data Struct */ + +struct guc_ads { + u32 reg_state_addr; + u32 reg_state_buffer; + u32 golden_context_lrca; + u32 scheduler_policies; + u32 reserved0[3]; + u32 eng_state_size[GUC_MAX_ENGINES_NUM]; + u32 reserved2[4]; +} __packed; + /* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */ enum host2guc_action { HOST2GUC_ACTION_DEFAULT = 0x0, diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 550921f2ef7d..3accd914490f 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -165,6 +165,13 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv) i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; } + if (guc->ads_obj) { + u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj) + >> PAGE_SHIFT; + params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; + params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; + } + /* If GuC submission is enabled, set up additional parameters here */ if (i915.enable_guc_submission) { u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj); @@ -438,6 +445,7 @@ fail: direct_interrupts_to_host(dev_priv); i915_guc_submission_disable(dev); + i915_guc_submission_fini(dev); return err; } @@ -554,10 +562,12 @@ fail: DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n", guc_fw->guc_fw_path, err); + mutex_lock(&dev->struct_mutex); obj = guc_fw->guc_fw_obj; if (obj) drm_gem_object_unreference(&obj->base); guc_fw->guc_fw_obj = NULL; + mutex_unlock(&dev->struct_mutex); release_firmware(fw); /* OK even if fw is NULL */ guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; @@ -624,10 +634,11 @@ void intel_guc_ucode_fini(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; + mutex_lock(&dev->struct_mutex); direct_interrupts_to_host(dev_priv); + i915_guc_submission_disable(dev); i915_guc_submission_fini(dev); - mutex_lock(&dev->struct_mutex); if (guc_fw->guc_fw_obj) drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); guc_fw->guc_fw_obj = NULL; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4a77639a489d..edb7e901ba4a 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1202,11 +1202,19 @@ intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_device *dev = intel_hdmi_to_dev(hdmi); enum drm_mode_status status; int clock; + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; clock = mode->clock; + + if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) + clock *= 2; + + if (clock > max_dotclk) + return MODE_CLOCK_HIGH; + if (mode->flags & DRM_MODE_FLAG_DBLCLK) clock *= 2; @@ -2033,6 +2041,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, enum port port = intel_dig_port->port; uint8_t alternate_ddc_pin; + if (WARN(intel_dig_port->max_lanes < 4, + "Not enough lanes (%d) for HDMI on port %c\n", + intel_dig_port->max_lanes, port_name(port))) + return; + drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); @@ -2218,6 +2231,7 @@ void intel_hdmi_init(struct drm_device *dev, dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->hdmi.hdmi_reg = hdmi_reg; intel_dig_port->dp.output_reg = INVALID_MMIO_REG; + intel_dig_port->max_lanes = 4; intel_hdmi_init_connector(intel_dig_port, intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 25254b5c1ac5..deb8282c26d8 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -683,7 +683,7 @@ int intel_setup_gmbus(struct drm_device *dev) return 0; err: - while (--pin) { + while (pin--) { if (!intel_gmbus_is_valid_pin(dev_priv, pin)) continue; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index f1fa756c5d5d..3a03646e343d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -225,7 +225,8 @@ enum { #define GEN8_CTX_ID_SHIFT 32 #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 -static int intel_lr_context_pin(struct drm_i915_gem_request *rq); +static int intel_lr_context_pin(struct intel_context *ctx, + struct intel_engine_cs *engine); static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, struct drm_i915_gem_object *default_ctx_obj); @@ -263,65 +264,92 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists return 0; } +static void +logical_ring_init_platform_invariants(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + + ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && + (ring->id == VCS || ring->id == VCS2); + + ring->ctx_desc_template = GEN8_CTX_VALID; + ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << + GEN8_CTX_ADDRESSING_MODE_SHIFT; + if (IS_GEN8(dev)) + ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; + ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE; + + /* TODO: WaDisableLiteRestore when we start using semaphore + * signalling between Command Streamers */ + /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */ + + /* WaEnableForceRestoreInCtxtDescForVCS:skl */ + /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ + if (ring->disable_lite_restore_wa) + ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; +} + /** - * intel_execlists_ctx_id() - get the Execlists Context ID - * @ctx_obj: Logical Ring Context backing object. + * intel_lr_context_descriptor_update() - calculate & cache the descriptor + * descriptor for a pinned context * - * Do not confuse with ctx->id! Unfortunately we have a name overload - * here: the old context ID we pass to userspace as a handler so that - * they can refer to a context, and the new context ID we pass to the - * ELSP so that the GPU can inform us of the context status via - * interrupts. + * @ctx: Context to work on + * @ring: Engine the descriptor will be used with * - * Return: 20-bits globally unique context ID. + * The context descriptor encodes various attributes of a context, + * including its GTT address and some flags. Because it's fairly + * expensive to calculate, we'll just do it once and cache the result, + * which remains valid until the context is unpinned. + * + * This is what a descriptor looks like, from LSB to MSB: + * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) + * bits 12-31: LRCA, GTT address of (the HWSP of) this context + * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) + * bits 52-63: reserved, may encode the engine ID (for GuC) */ -u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) +static void +intel_lr_context_descriptor_update(struct intel_context *ctx, + struct intel_engine_cs *ring) { - u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) + - LRC_PPHWSP_PN * PAGE_SIZE; + uint64_t lrca, desc; - /* LRCA is required to be 4K aligned so the more significant 20 bits - * are globally unique */ - return lrca >> 12; -} + lrca = ctx->engine[ring->id].lrc_vma->node.start + + LRC_PPHWSP_PN * PAGE_SIZE; -static bool disable_lite_restore_wa(struct intel_engine_cs *ring) -{ - struct drm_device *dev = ring->dev; + desc = ring->ctx_desc_template; /* bits 0-11 */ + desc |= lrca; /* bits 12-31 */ + desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ - return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && - (ring->id == VCS || ring->id == VCS2); + ctx->engine[ring->id].lrc_desc = desc; } uint64_t intel_lr_context_descriptor(struct intel_context *ctx, struct intel_engine_cs *ring) { - struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; - uint64_t desc; - uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + - LRC_PPHWSP_PN * PAGE_SIZE; - - WARN_ON(lrca & 0xFFFFFFFF00000FFFULL); - - desc = GEN8_CTX_VALID; - desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (IS_GEN8(ctx_obj->base.dev)) - desc |= GEN8_CTX_L3LLC_COHERENT; - desc |= GEN8_CTX_PRIVILEGE; - desc |= lrca; - desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT; - - /* TODO: WaDisableLiteRestore when we start using semaphore - * signalling between Command Streamers */ - /* desc |= GEN8_CTX_FORCE_RESTORE; */ - - /* WaEnableForceRestoreInCtxtDescForVCS:skl */ - /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ - if (disable_lite_restore_wa(ring)) - desc |= GEN8_CTX_FORCE_RESTORE; + return ctx->engine[ring->id].lrc_desc; +} - return desc; +/** + * intel_execlists_ctx_id() - get the Execlists Context ID + * @ctx: Context to get the ID for + * @ring: Engine to get the ID for + * + * Do not confuse with ctx->id! Unfortunately we have a name overload + * here: the old context ID we pass to userspace as a handler so that + * they can refer to a context, and the new context ID we pass to the + * ELSP so that the GPU can inform us of the context status via + * interrupts. + * + * The context ID is a portion of the context descriptor, so we can + * just extract the required part from the cached descriptor. + * + * Return: 20-bits globally unique context ID. + */ +u32 intel_execlists_ctx_id(struct intel_context *ctx, + struct intel_engine_cs *ring) +{ + return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT; } static void execlists_elsp_write(struct drm_i915_gem_request *rq0, @@ -363,20 +391,9 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) { struct intel_engine_cs *ring = rq->ring; struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; - struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; - struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; - struct page *page; - uint32_t *reg_state; - - BUG_ON(!ctx_obj); - WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); - WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); - - page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); - reg_state = kmap_atomic(page); + uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state; reg_state[CTX_RING_TAIL+1] = rq->tail; - reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { /* True 32b PPGTT with dynamic page allocation: update PDP @@ -390,8 +407,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) ASSIGN_CTX_PDP(ppgtt, reg_state, 0); } - kunmap_atomic(reg_state); - return 0; } @@ -431,9 +446,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring) /* Same ctx: ignore first request, as second request * will update tail past first request's workload */ cursor->elsp_submitted = req0->elsp_submitted; - list_del(&req0->execlist_link); - list_add_tail(&req0->execlist_link, - &ring->execlist_retired_req_list); + list_move_tail(&req0->execlist_link, + &ring->execlist_retired_req_list); req0 = cursor; } else { req1 = cursor; @@ -478,16 +492,13 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring, execlist_link); if (head_req != NULL) { - struct drm_i915_gem_object *ctx_obj = - head_req->ctx->engine[ring->id].state; - if (intel_execlists_ctx_id(ctx_obj) == request_id) { + if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) { WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); if (--head_req->elsp_submitted <= 0) { - list_del(&head_req->execlist_link); - list_add_tail(&head_req->execlist_link, - &ring->execlist_retired_req_list); + list_move_tail(&head_req->execlist_link, + &ring->execlist_retired_req_list); return true; } } @@ -496,6 +507,19 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring, return false; } +static void get_context_status(struct intel_engine_cs *ring, + u8 read_pointer, + u32 *status, u32 *context_id) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES)) + return; + + *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer)); + *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer)); +} + /** * intel_lrc_irq_handler() - handle Context Switch interrupts * @ring: Engine Command Streamer to handle. @@ -516,16 +540,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & GEN8_CSB_PTR_MASK; + write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); if (read_pointer > write_pointer) write_pointer += GEN8_CSB_ENTRIES; spin_lock(&ring->execlist_lock); while (read_pointer < write_pointer) { - read_pointer++; - status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES)); - status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES)); + + get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES, + &status, &status_id); if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) continue; @@ -538,14 +562,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) WARN(1, "Preemption without Lite Restore\n"); } - if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) || - (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) { + if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) || + (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) { if (execlists_check_remove_request(ring, status_id)) submit_contexts++; } } - if (disable_lite_restore_wa(ring)) { + if (ring->disable_lite_restore_wa) { /* Prevent a ctx to preempt itself */ if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && (submit_contexts != 0)) @@ -556,13 +580,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) spin_unlock(&ring->execlist_lock); - WARN(submit_contexts > 2, "More than two context complete events?\n"); + if (unlikely(submit_contexts > 2)) + DRM_ERROR("More than two context complete events?\n"); + ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; + /* Update the read pointer to the old write pointer. Manual ringbuffer + * management ftw </sarcasm> */ I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), - _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, - ((u32)ring->next_context_status_buffer & - GEN8_CSB_PTR_MASK) << 8)); + _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, + ring->next_context_status_buffer << 8)); } static int execlists_context_queue(struct drm_i915_gem_request *request) @@ -571,8 +598,8 @@ static int execlists_context_queue(struct drm_i915_gem_request *request) struct drm_i915_gem_request *cursor; int num_elements = 0; - if (request->ctx != ring->default_context) - intel_lr_context_pin(request); + if (request->ctx != request->i915->kernel_context) + intel_lr_context_pin(request->ctx, ring); i915_gem_request_reference(request); @@ -592,9 +619,8 @@ static int execlists_context_queue(struct drm_i915_gem_request *request) if (request->ctx == tail_req->ctx) { WARN(tail_req->elsp_submitted != 0, "More than 2 already-submitted reqs queued\n"); - list_del(&tail_req->execlist_link); - list_add_tail(&tail_req->execlist_link, - &ring->execlist_retired_req_list); + list_move_tail(&tail_req->execlist_link, + &ring->execlist_retired_req_list); } } @@ -660,17 +686,27 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - int ret; + int ret = 0; request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; - if (request->ctx != request->ring->default_context) { - ret = intel_lr_context_pin(request); + if (i915.enable_guc_submission) { + /* + * Check that the GuC has space for the request before + * going any further, as the i915_add_request() call + * later on mustn't fail ... + */ + struct intel_guc *guc = &request->i915->guc; + + ret = i915_guc_wq_check_space(guc->execbuf_client); if (ret) return ret; } - return 0; + if (request->ctx != request->i915->kernel_context) + ret = intel_lr_context_pin(request->ctx, request->ring); + + return ret; } static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, @@ -724,23 +760,46 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, * on a queue waiting for the ELSP to be ready to accept a new context submission. At that * point, the tail *inside* the context is updated and the ELSP written to. */ -static void +static int intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { - struct intel_engine_cs *ring = request->ring; + struct intel_ringbuffer *ringbuf = request->ringbuf; struct drm_i915_private *dev_priv = request->i915; + struct intel_engine_cs *engine = request->ring; - intel_logical_ring_advance(request->ringbuf); + intel_logical_ring_advance(ringbuf); + request->tail = ringbuf->tail; - request->tail = request->ringbuf->tail; + /* + * Here we add two extra NOOPs as padding to avoid + * lite restore of a context with HEAD==TAIL. + * + * Caller must reserve WA_TAIL_DWORDS for us! + */ + intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_logical_ring_advance(ringbuf); - if (intel_ring_stopped(ring)) - return; + if (intel_ring_stopped(engine)) + return 0; + + if (engine->last_context != request->ctx) { + if (engine->last_context) + intel_lr_context_unpin(engine->last_context, engine); + if (request->ctx != request->i915->kernel_context) { + intel_lr_context_pin(request->ctx, engine); + engine->last_context = request->ctx; + } else { + engine->last_context = NULL; + } + } if (dev_priv->guc.execbuf_client) i915_guc_submit(dev_priv->guc.execbuf_client, request); else execlists_context_queue(request); + + return 0; } static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) @@ -967,8 +1026,9 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring) struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; - if (ctx_obj && (ctx != ring->default_context)) - intel_lr_context_unpin(req); + if (ctx_obj && (ctx != req->i915->kernel_context)) + intel_lr_context_unpin(ctx, ring); + list_del(&req->execlist_link); i915_gem_request_unreference(req); } @@ -1012,24 +1072,39 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) return 0; } -static int intel_lr_context_do_pin(struct intel_engine_cs *ring, - struct drm_i915_gem_object *ctx_obj, - struct intel_ringbuffer *ringbuf) +static int intel_lr_context_do_pin(struct intel_context *ctx, + struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; + struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; + struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; + struct page *lrc_state_page; + uint32_t *lrc_reg_state; + int ret; WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, PIN_OFFSET_BIAS | GUC_WOPCM_TOP); if (ret) return ret; + lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); + if (WARN_ON(!lrc_state_page)) { + ret = -ENODEV; + goto unpin_ctx_obj; + } + ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); if (ret) goto unpin_ctx_obj; + ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); + intel_lr_context_descriptor_update(ctx, ring); + lrc_reg_state = kmap(lrc_state_page); + lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; + ctx->engine[ring->id].lrc_reg_state = lrc_reg_state; ctx_obj->dirty = true; /* Invalidate GuC TLB. */ @@ -1044,37 +1119,44 @@ unpin_ctx_obj: return ret; } -static int intel_lr_context_pin(struct drm_i915_gem_request *rq) +static int intel_lr_context_pin(struct intel_context *ctx, + struct intel_engine_cs *engine) { int ret = 0; - struct intel_engine_cs *ring = rq->ring; - struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; - struct intel_ringbuffer *ringbuf = rq->ringbuf; - if (rq->ctx->engine[ring->id].pin_count++ == 0) { - ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf); + if (ctx->engine[engine->id].pin_count++ == 0) { + ret = intel_lr_context_do_pin(ctx, engine); if (ret) goto reset_pin_count; + + i915_gem_context_reference(ctx); } return ret; reset_pin_count: - rq->ctx->engine[ring->id].pin_count = 0; + ctx->engine[engine->id].pin_count = 0; return ret; } -void intel_lr_context_unpin(struct drm_i915_gem_request *rq) +void intel_lr_context_unpin(struct intel_context *ctx, + struct intel_engine_cs *engine) { - struct intel_engine_cs *ring = rq->ring; - struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; - struct intel_ringbuffer *ringbuf = rq->ringbuf; + struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; - if (ctx_obj) { - WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); - if (--rq->ctx->engine[ring->id].pin_count == 0) { - intel_unpin_ringbuffer_obj(ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - } + WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); + + if (WARN_ON_ONCE(!ctx_obj)) + return; + + if (--ctx->engine[engine->id].pin_count == 0) { + kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state)); + intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); + i915_gem_object_ggtt_unpin(ctx_obj); + ctx->engine[engine->id].lrc_vma = NULL; + ctx->engine[engine->id].lrc_desc = 0; + ctx->engine[engine->id].lrc_reg_state = NULL; + + i915_gem_context_unreference(ctx); } } @@ -1087,7 +1169,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) struct drm_i915_private *dev_priv = dev->dev_private; struct i915_workarounds *w = &dev_priv->workarounds; - if (WARN_ON_ONCE(w->count == 0)) + if (w->count == 0) return 0; ring->gpu_caches_dirty = true; @@ -1474,7 +1556,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) u8 next_context_status_buffer_hw; lrc_setup_hardware_status_page(ring, - ring->default_context->engine[ring->id].state); + dev_priv->kernel_context->engine[ring->id].state); I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -1493,9 +1575,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | * BDW | CSB regs not reset | CSB regs reset | * CHT | CSB regs not reset | CSB regs not reset | + * SKL | ? | ? | + * BXT | ? | ? | */ - next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) - & GEN8_CSB_PTR_MASK); + next_context_status_buffer_hw = + GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring))); /* * When the CSB registers are reset (also after power-up / gpu reset), @@ -1698,7 +1782,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *ring = ringbuf->ring; u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; - bool vf_flush_wa; + bool vf_flush_wa = false; u32 flags = 0; int ret; @@ -1720,14 +1804,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; - } - /* - * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe - * control. - */ - vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 && - flags & PIPE_CONTROL_VF_CACHE_INVALIDATE; + /* + * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL + * pipe control. + */ + if (IS_GEN9(ring->dev)) + vf_flush_wa = true; + } ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); if (ret) @@ -1791,44 +1875,65 @@ static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno) intel_flush_status_page(ring, I915_GEM_HWS_INDEX); } +/* + * Reserve space for 2 NOOPs at the end of each request to be + * used as a workaround for not being allowed to do lite + * restore with HEAD==TAIL (WaIdleLiteRestore). + */ +#define WA_TAIL_DWORDS 2 + +static inline u32 hws_seqno_address(struct intel_engine_cs *engine) +{ + return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; +} + static int gen8_emit_request(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *ring = ringbuf->ring; - u32 cmd; int ret; - /* - * Reserve space for 2 NOOPs at the end of each request to be - * used as a workaround for not being allowed to do lite - * restore with HEAD==TAIL (WaIdleLiteRestore). - */ - ret = intel_logical_ring_begin(request, 8); + ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); if (ret) return ret; - cmd = MI_STORE_DWORD_IMM_GEN4; - cmd |= MI_GLOBAL_GTT; + /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ + BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); - intel_logical_ring_emit(ringbuf, cmd); intel_logical_ring_emit(ringbuf, - (ring->status_page.gfx_addr + - (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); + (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); + intel_logical_ring_emit(ringbuf, + hws_seqno_address(request->ring) | + MI_FLUSH_DW_USE_GTT); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance_and_submit(request); + return intel_logical_ring_advance_and_submit(request); +} - /* - * Here we add two extra NOOPs as padding to avoid - * lite restore of a context with HEAD==TAIL. - */ - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); +static int gen8_emit_request_render(struct drm_i915_gem_request *request) +{ + struct intel_ringbuffer *ringbuf = request->ringbuf; + int ret; - return 0; + ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); + if (ret) + return ret; + + /* w/a for post sync ops following a GPGPU operation we + * need a prior CS_STALL, which is emitted by the flush + * following the batch. + */ + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); + intel_logical_ring_emit(ringbuf, + (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); + intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); + return intel_logical_ring_advance_and_submit(request); } static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req) @@ -1911,12 +2016,44 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) ring->status_page.obj = NULL; } + ring->disable_lite_restore_wa = false; + ring->ctx_desc_template = 0; + lrc_destroy_wa_ctx_obj(ring); ring->dev = NULL; } -static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) +static void +logical_ring_default_vfuncs(struct drm_device *dev, + struct intel_engine_cs *ring) +{ + /* Default vfuncs which can be overriden by each engine. */ + ring->init_hw = gen8_init_common_ring; + ring->emit_request = gen8_emit_request; + ring->emit_flush = gen8_emit_flush; + ring->irq_get = gen8_logical_ring_get_irq; + ring->irq_put = gen8_logical_ring_put_irq; + ring->emit_bb_start = gen8_emit_bb_start; + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + ring->get_seqno = bxt_a_get_seqno; + ring->set_seqno = bxt_a_set_seqno; + } else { + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + } +} + +static inline void +logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift) { + ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; + ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; +} + +static int +logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) +{ + struct intel_context *dctx = to_i915(dev)->kernel_context; int ret; /* Intentionally left blank. */ @@ -1933,19 +2070,18 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin INIT_LIST_HEAD(&ring->execlist_retired_req_list); spin_lock_init(&ring->execlist_lock); + logical_ring_init_platform_invariants(ring); + ret = i915_cmd_parser_init_ring(ring); if (ret) goto error; - ret = intel_lr_context_deferred_alloc(ring->default_context, ring); + ret = intel_lr_context_deferred_alloc(dctx, ring); if (ret) goto error; /* As this is the default context, always pin it */ - ret = intel_lr_context_do_pin( - ring, - ring->default_context->engine[ring->id].state, - ring->default_context->engine[ring->id].ringbuf); + ret = intel_lr_context_do_pin(dctx, ring); if (ret) { DRM_ERROR( "Failed to pin and map ringbuffer %s: %d\n", @@ -1968,32 +2104,25 @@ static int logical_render_ring_init(struct drm_device *dev) ring->name = "render ring"; ring->id = RCS; + ring->exec_id = I915_EXEC_RENDER; + ring->guc_id = GUC_RENDER_ENGINE; ring->mmio_base = RENDER_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT; + + logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT); if (HAS_L3_DPF(dev)) ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + logical_ring_default_vfuncs(dev, ring); + + /* Override some for render ring. */ if (INTEL_INFO(dev)->gen >= 9) ring->init_hw = gen9_init_render_ring; else ring->init_hw = gen8_init_render_ring; ring->init_context = gen8_init_rcs_context; ring->cleanup = intel_fini_pipe_control; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ring->get_seqno = bxt_a_get_seqno; - ring->set_seqno = bxt_a_set_seqno; - } else { - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - } - ring->emit_request = gen8_emit_request; ring->emit_flush = gen8_emit_flush_render; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + ring->emit_request = gen8_emit_request_render; ring->dev = dev; @@ -2027,25 +2156,12 @@ static int logical_bsd_ring_init(struct drm_device *dev) ring->name = "bsd ring"; ring->id = VCS; + ring->exec_id = I915_EXEC_BSD; + ring->guc_id = GUC_VIDEO_ENGINE; ring->mmio_base = GEN6_BSD_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - ring->init_hw = gen8_init_common_ring; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ring->get_seqno = bxt_a_get_seqno; - ring->set_seqno = bxt_a_set_seqno; - } else { - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - } - ring->emit_request = gen8_emit_request; - ring->emit_flush = gen8_emit_flush; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, ring); return logical_ring_init(dev, ring); } @@ -2055,22 +2171,14 @@ static int logical_bsd2_ring_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; - ring->name = "bds2 ring"; + ring->name = "bsd2 ring"; ring->id = VCS2; + ring->exec_id = I915_EXEC_BSD; + ring->guc_id = GUC_VIDEO_ENGINE2; ring->mmio_base = GEN8_BSD2_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; - ring->init_hw = gen8_init_common_ring; - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - ring->emit_request = gen8_emit_request; - ring->emit_flush = gen8_emit_flush; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, ring); return logical_ring_init(dev, ring); } @@ -2082,25 +2190,12 @@ static int logical_blt_ring_init(struct drm_device *dev) ring->name = "blitter ring"; ring->id = BCS; + ring->exec_id = I915_EXEC_BLT; + ring->guc_id = GUC_BLITTER_ENGINE; ring->mmio_base = BLT_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - ring->init_hw = gen8_init_common_ring; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ring->get_seqno = bxt_a_get_seqno; - ring->set_seqno = bxt_a_set_seqno; - } else { - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - } - ring->emit_request = gen8_emit_request; - ring->emit_flush = gen8_emit_flush; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, ring); return logical_ring_init(dev, ring); } @@ -2112,25 +2207,12 @@ static int logical_vebox_ring_init(struct drm_device *dev) ring->name = "video enhancement ring"; ring->id = VECS; + ring->exec_id = I915_EXEC_VEBOX; + ring->guc_id = GUC_VIDEOENHANCE_ENGINE; ring->mmio_base = VEBOX_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - ring->init_hw = gen8_init_common_ring; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ring->get_seqno = bxt_a_get_seqno; - ring->set_seqno = bxt_a_set_seqno; - } else { - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - } - ring->emit_request = gen8_emit_request; - ring->emit_flush = gen8_emit_flush; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, ring); return logical_ring_init(dev, ring); } @@ -2368,26 +2450,39 @@ void intel_lr_context_free(struct intel_context *ctx) { int i; - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = I915_NUM_RINGS; --i >= 0; ) { + struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; - if (ctx_obj) { - struct intel_ringbuffer *ringbuf = - ctx->engine[i].ringbuf; - struct intel_engine_cs *ring = ringbuf->ring; + if (!ctx_obj) + continue; - if (ctx == ring->default_context) { - intel_unpin_ringbuffer_obj(ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - } - WARN_ON(ctx->engine[ring->id].pin_count); - intel_ringbuffer_free(ringbuf); - drm_gem_object_unreference(&ctx_obj->base); + if (ctx == ctx->i915->kernel_context) { + intel_unpin_ringbuffer_obj(ringbuf); + i915_gem_object_ggtt_unpin(ctx_obj); } + + WARN_ON(ctx->engine[i].pin_count); + intel_ringbuffer_free(ringbuf); + drm_gem_object_unreference(&ctx_obj->base); } } -static uint32_t get_lr_context_size(struct intel_engine_cs *ring) +/** + * intel_lr_context_size() - return the size of the context for an engine + * @ring: which engine to find the context size for + * + * Each engine may require a different amount of space for a context image, + * so when allocating (or copying) an image, this function can be used to + * find the right size for the specific engine. + * + * Return: size (in bytes) of an engine-specific context image + * + * Note: this size includes the HWSP, which is part of the context image + * in LRC mode, but does not include the "shared data page" used with + * GuC submission. The caller should account for this if using the GuC. + */ +uint32_t intel_lr_context_size(struct intel_engine_cs *ring) { int ret = 0; @@ -2444,7 +2539,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, */ int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *ring) + struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_gem_object *ctx_obj; @@ -2455,7 +2550,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); WARN_ON(ctx->engine[ring->id].state); - context_size = round_up(get_lr_context_size(ring), 4096); + context_size = round_up(intel_lr_context_size(ring), 4096); /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; @@ -2481,14 +2576,13 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ctx->engine[ring->id].ringbuf = ringbuf; ctx->engine[ring->id].state = ctx_obj; - if (ctx != ring->default_context && ring->init_context) { + if (ctx != ctx->i915->kernel_context && ring->init_context) { struct drm_i915_gem_request *req; - ret = i915_gem_request_alloc(ring, - ctx, &req); - if (ret) { - DRM_ERROR("ring create req: %d\n", - ret); + req = i915_gem_request_alloc(ring, ctx); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + DRM_ERROR("ring create req: %d\n", ret); goto error_ringbuf; } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 0b821b91723a..e6cda3e225d0 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,8 +25,6 @@ #define _INTEL_LRC_H_ #define GEN8_LR_CONTEXT_ALIGN 4096 -#define GEN8_CSB_ENTRIES 6 -#define GEN8_CSB_PTR_MASK 0x07 /* Execlists regs */ #define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230) @@ -40,6 +38,22 @@ #define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4) #define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0) +/* The docs specify that the write pointer wraps around after 5h, "After status + * is written out to the last available status QW at offset 5h, this pointer + * wraps to 0." + * + * Therefore, one must infer than even though there are 3 bits available, 6 and + * 7 appear to be * reserved. + */ +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x7 +#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8) +#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0) +#define GEN8_CSB_WRITE_PTR(csb_status) \ + (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0) +#define GEN8_CSB_READ_PTR(csb_status) \ + (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8) + /* Logical Rings */ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); @@ -84,21 +98,25 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) void intel_lr_context_free(struct intel_context *ctx); +uint32_t intel_lr_context_size(struct intel_engine_cs *ring); int intel_lr_context_deferred_alloc(struct intel_context *ctx, struct intel_engine_cs *ring); -void intel_lr_context_unpin(struct drm_i915_gem_request *req); +void intel_lr_context_unpin(struct intel_context *ctx, + struct intel_engine_cs *engine); void intel_lr_context_reset(struct drm_device *dev, struct intel_context *ctx); uint64_t intel_lr_context_descriptor(struct intel_context *ctx, struct intel_engine_cs *ring); +u32 intel_execlists_ctx_id(struct intel_context *ctx, + struct intel_engine_cs *ring); + /* Execlists */ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); struct i915_execbuffer_params; int intel_execlists_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas); -u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); void intel_lrc_irq_handler(struct intel_engine_cs *ring); void intel_execlists_retire_requests(struct intel_engine_cs *ring); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0da0240caf81..811ddf7799f0 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -31,6 +31,7 @@ #include <linux/dmi.h> #include <linux/i2c.h> #include <linux/slab.h> +#include <linux/vga_switcheroo.h> #include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> @@ -1080,7 +1081,12 @@ void intel_lvds_init(struct drm_device *dev) * preferred mode is the right one. */ mutex_lock(&dev->mode_config.mutex); - edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin)); + if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) + edid = drm_get_edid_switcheroo(connector, + intel_gmbus_get_adapter(dev_priv, pin)); + else + edid = drm_get_edid(connector, + intel_gmbus_get_adapter(dev_priv, pin)); if (edid) { if (drm_add_edid_modes(connector, edid)) { drm_mode_connector_update_edid_property(connector, diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 76f1980a7541..9168413fe204 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -240,9 +240,9 @@ static int intel_overlay_on(struct intel_overlay *overlay) WARN_ON(overlay->active); WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = intel_ring_begin(req, 4); if (ret) { @@ -283,9 +283,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay, if (tmp & (1 << 17)) DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = intel_ring_begin(req, 2); if (ret) { @@ -349,9 +349,9 @@ static int intel_overlay_off(struct intel_overlay *overlay) * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = intel_ring_begin(req, 6); if (ret) { @@ -423,9 +423,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) /* synchronous slowpath */ struct drm_i915_gem_request *req; - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = intel_ring_begin(req, 2); if (ret) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index eb5fa05cf476..379eabe093cb 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -32,6 +32,8 @@ #include <linux/module.h> /** + * DOC: RC6 + * * RC6 is a special power stage which allows the GPU to enter an very * low-voltage mode when idle, using down to 0V while at this stage. This * stage is entered automatically when the GPU is idle when RC6 support is @@ -546,7 +548,7 @@ static const struct intel_watermark_params i845_wm_info = { * intel_calculate_wm - calculate watermark level * @clock_in_khz: pixel clock * @wm: chip FIFO params - * @pixel_size: display pixel size + * @cpp: bytes per pixel * @latency_ns: memory latency for the platform * * Calculate the watermark level (the level at which the display plane will @@ -562,8 +564,7 @@ static const struct intel_watermark_params i845_wm_info = { */ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, const struct intel_watermark_params *wm, - int fifo_size, - int pixel_size, + int fifo_size, int cpp, unsigned long latency_ns) { long entries_required, wm_size; @@ -574,7 +575,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, * clocks go from a few thousand to several hundred thousand. * latency is usually a few thousand */ - entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / + entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) / 1000; entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); @@ -638,13 +639,13 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) crtc = single_enabled_crtc(dev); if (crtc) { const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; - int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); int clock = adjusted_mode->crtc_clock; /* Display SR */ wm = intel_calculate_wm(clock, &pineview_display_wm, pineview_display_wm.fifo_size, - pixel_size, latency->display_sr); + cpp, latency->display_sr); reg = I915_READ(DSPFW1); reg &= ~DSPFW_SR_MASK; reg |= FW_WM(wm, SR); @@ -654,7 +655,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) /* cursor SR */ wm = intel_calculate_wm(clock, &pineview_cursor_wm, pineview_display_wm.fifo_size, - pixel_size, latency->cursor_sr); + cpp, latency->cursor_sr); reg = I915_READ(DSPFW3); reg &= ~DSPFW_CURSOR_SR_MASK; reg |= FW_WM(wm, CURSOR_SR); @@ -663,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) /* Display HPLL off SR */ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, pineview_display_hplloff_wm.fifo_size, - pixel_size, latency->display_hpll_disable); + cpp, latency->display_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_SR_MASK; reg |= FW_WM(wm, HPLL_SR); @@ -672,7 +673,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) /* cursor HPLL off SR */ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pineview_display_hplloff_wm.fifo_size, - pixel_size, latency->cursor_hpll_disable); + cpp, latency->cursor_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_CURSOR_MASK; reg |= FW_WM(wm, HPLL_CURSOR); @@ -696,7 +697,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, { struct drm_crtc *crtc; const struct drm_display_mode *adjusted_mode; - int htotal, hdisplay, clock, pixel_size; + int htotal, hdisplay, clock, cpp; int line_time_us, line_count; int entries, tlb_miss; @@ -711,10 +712,10 @@ static bool g4x_compute_wm0(struct drm_device *dev, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; - pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; + cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); /* Use the small buffer method to calculate plane watermark */ - entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; + entries = ((clock * cpp / 1000) * display_latency_ns) / 1000; tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; if (tlb_miss > 0) entries += tlb_miss; @@ -726,7 +727,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, /* Use the large buffer method to calculate cursor watermark */ line_time_us = max(htotal * 1000 / clock, 1); line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; - entries = line_count * crtc->cursor->state->crtc_w * pixel_size; + entries = line_count * crtc->cursor->state->crtc_w * cpp; tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; if (tlb_miss > 0) entries += tlb_miss; @@ -782,7 +783,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, { struct drm_crtc *crtc; const struct drm_display_mode *adjusted_mode; - int hdisplay, htotal, pixel_size, clock; + int hdisplay, htotal, cpp, clock; unsigned long line_time_us; int line_count, line_size; int small, large; @@ -798,21 +799,21 @@ static bool g4x_compute_srwm(struct drm_device *dev, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; - pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; + cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); line_time_us = max(htotal * 1000 / clock, 1); line_count = (latency_ns / line_time_us + 1000) / 1000; - line_size = hdisplay * pixel_size; + line_size = hdisplay * cpp; /* Use the minimum of the small and large buffer method for primary */ - small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + small = ((clock * cpp / 1000) * latency_ns) / 1000; large = line_count * line_size; entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); *display_wm = entries + display->guard_size; /* calculate the self-refresh watermark for display cursor */ - entries = line_count * pixel_size * crtc->cursor->state->crtc_w; + entries = line_count * cpp * crtc->cursor->state->crtc_w; entries = DIV_ROUND_UP(entries, cursor->cacheline_size); *cursor_wm = entries + cursor->guard_size; @@ -904,13 +905,13 @@ enum vlv_wm_level { static unsigned int vlv_wm_method2(unsigned int pixel_rate, unsigned int pipe_htotal, unsigned int horiz_pixels, - unsigned int bytes_per_pixel, + unsigned int cpp, unsigned int latency) { unsigned int ret; ret = (latency * pixel_rate) / (pipe_htotal * 10000); - ret = (ret + 1) * horiz_pixels * bytes_per_pixel; + ret = (ret + 1) * horiz_pixels * cpp; ret = DIV_ROUND_UP(ret, 64); return ret; @@ -939,7 +940,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane, int level) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - int clock, htotal, pixel_size, width, wm; + int clock, htotal, cpp, width, wm; if (dev_priv->wm.pri_latency[level] == 0) return USHRT_MAX; @@ -947,7 +948,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane, if (!state->visible) return 0; - pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0); + cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0); clock = crtc->config->base.adjusted_mode.crtc_clock; htotal = crtc->config->base.adjusted_mode.crtc_htotal; width = crtc->config->pipe_src_w; @@ -963,7 +964,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane, */ wm = 63; } else { - wm = vlv_wm_method2(clock, htotal, width, pixel_size, + wm = vlv_wm_method2(clock, htotal, width, cpp, dev_priv->wm.pri_latency[level] * 10); } @@ -1437,7 +1438,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; - int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); unsigned long line_time_us; int entries; @@ -1445,7 +1446,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) /* Use ns/us then divide to preserve precision */ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * hdisplay; + cpp * hdisplay; entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); srwm = I965_FIFO_SIZE - entries; if (srwm < 0) @@ -1455,7 +1456,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) entries, srwm); entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * crtc->cursor->state->crtc_w; + cpp * crtc->cursor->state->crtc_w; entries = DIV_ROUND_UP(entries, i965_cursor_wm_info.cacheline_size); cursor_sr = i965_cursor_wm_info.fifo_size - @@ -1516,7 +1517,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev, 0); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode; - int cpp = crtc->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); if (IS_GEN2(dev)) cpp = 4; @@ -1538,7 +1539,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev, 1); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode; - int cpp = crtc->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); if (IS_GEN2(dev)) cpp = 4; @@ -1584,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; - int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0); unsigned long line_time_us; int entries; @@ -1592,7 +1593,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) /* Use ns/us then divide to preserve precision */ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * hdisplay; + cpp * hdisplay; entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); srwm = wm_info->fifo_size - entries; @@ -1672,6 +1673,9 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) if (pipe_h < pfit_h) pipe_h = pfit_h; + if (WARN_ON(!pfit_w || !pfit_h)) + return pixel_rate; + pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, pfit_w * pfit_h); } @@ -1680,15 +1684,14 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) } /* latency must be in 0.1us units. */ -static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, - uint32_t latency) +static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) { uint64_t ret; if (WARN(latency == 0, "Latency value missing\n")) return UINT_MAX; - ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; + ret = (uint64_t) pixel_rate * cpp * latency; ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; return ret; @@ -1696,24 +1699,37 @@ static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, /* latency must be in 0.1us units. */ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, - uint32_t horiz_pixels, uint8_t bytes_per_pixel, + uint32_t horiz_pixels, uint8_t cpp, uint32_t latency) { uint32_t ret; if (WARN(latency == 0, "Latency value missing\n")) return UINT_MAX; + if (WARN_ON(!pipe_htotal)) + return UINT_MAX; ret = (latency * pixel_rate) / (pipe_htotal * 10000); - ret = (ret + 1) * horiz_pixels * bytes_per_pixel; + ret = (ret + 1) * horiz_pixels * cpp; ret = DIV_ROUND_UP(ret, 64) + 2; return ret; } static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, - uint8_t bytes_per_pixel) + uint8_t cpp) { - return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; + /* + * Neither of these should be possible since this function shouldn't be + * called if the CRTC is off or the plane is invisible. But let's be + * extra paranoid to avoid a potential divide-by-zero if we screw up + * elsewhere in the driver. + */ + if (WARN_ON(!cpp)) + return 0; + if (WARN_ON(!horiz_pixels)) + return 0; + + return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; } struct ilk_wm_maximums { @@ -1732,13 +1748,14 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, uint32_t mem_value, bool is_lp) { - int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; + int cpp = pstate->base.fb ? + drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; uint32_t method1, method2; if (!cstate->base.active || !pstate->visible) return 0; - method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value); + method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); if (!is_lp) return method1; @@ -1746,8 +1763,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, drm_rect_width(&pstate->dst), - bpp, - mem_value); + cpp, mem_value); return min(method1, method2); } @@ -1760,18 +1776,18 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate, uint32_t mem_value) { - int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; + int cpp = pstate->base.fb ? + drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; uint32_t method1, method2; if (!cstate->base.active || !pstate->visible) return 0; - method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value); + method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, drm_rect_width(&pstate->dst), - bpp, - mem_value); + cpp, mem_value); return min(method1, method2); } @@ -1783,16 +1799,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate, uint32_t mem_value) { - int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; + /* + * We treat the cursor plane as always-on for the purposes of watermark + * calculation. Until we have two-stage watermark programming merged, + * this is necessary to avoid flickering. + */ + int cpp = 4; + int width = pstate->visible ? pstate->base.crtc_w : 64; - if (!cstate->base.active || !pstate->visible) + if (!cstate->base.active) return 0; return ilk_wm_method2(ilk_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, - drm_rect_width(&pstate->dst), - bpp, - mem_value); + width, cpp, mem_value); } /* Only for WM_LP. */ @@ -1800,12 +1820,13 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate, uint32_t pri_val) { - int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; + int cpp = pstate->base.fb ? + drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; if (!cstate->base.active || !pstate->visible) return 0; - return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp); + return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp); } static unsigned int ilk_display_fifo_size(const struct drm_device *dev) @@ -1998,14 +2019,19 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, } static uint32_t -hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) +hsw_compute_linetime_wm(struct drm_device *dev, + struct intel_crtc_state *cstate) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = + &cstate->base.adjusted_mode; u32 linetime, ips_linetime; - if (!intel_crtc->active) + if (!cstate->base.active) + return 0; + if (WARN_ON(adjusted_mode->crtc_clock == 0)) + return 0; + if (WARN_ON(dev_priv->cdclk_freq == 0)) return 0; /* The WM are computed with base on how long it takes to fill a single @@ -2277,6 +2303,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc, return PTR_ERR(cstate); pipe_wm = &cstate->wm.optimal.ilk; + memset(pipe_wm, 0, sizeof(*pipe_wm)); for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { ps = drm_atomic_get_plane_state(state, @@ -2313,8 +2340,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc, pristate, sprstate, curstate, &pipe_wm->wm[0]); if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - pipe_wm->linetime = hsw_compute_linetime_wm(dev, - &intel_crtc->base); + pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); /* LP0 watermarks always use 1/2 DDB partitioning */ ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); @@ -3019,26 +3045,25 @@ static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) /* * The max latency should be 257 (max the punit can code is 255 and we add 2us - * for the read latency) and bytes_per_pixel should always be <= 8, so that + * for the read latency) and cpp should always be <= 8, so that * should allow pixel_rate up to ~2 GHz which seems sufficient since max * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. */ -static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, - uint32_t latency) +static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) { uint32_t wm_intermediate_val, ret; if (latency == 0) return UINT_MAX; - wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512; + wm_intermediate_val = latency * pixel_rate * cpp / 512; ret = DIV_ROUND_UP(wm_intermediate_val, 1000); return ret; } static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, - uint32_t horiz_pixels, uint8_t bytes_per_pixel, + uint32_t horiz_pixels, uint8_t cpp, uint64_t tiling, uint32_t latency) { uint32_t ret; @@ -3048,7 +3073,7 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, if (latency == 0) return UINT_MAX; - plane_bytes_per_line = horiz_pixels * bytes_per_pixel; + plane_bytes_per_line = horiz_pixels * cpp; if (tiling == I915_FORMAT_MOD_Y_TILED || tiling == I915_FORMAT_MOD_Yf_TILED) { @@ -3098,23 +3123,21 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t plane_bytes_per_line, plane_blocks_per_line; uint32_t res_blocks, res_lines; uint32_t selected_result; - uint8_t bytes_per_pixel; + uint8_t cpp; if (latency == 0 || !cstate->base.active || !fb) return false; - bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0); + cpp = drm_format_plane_cpp(fb->pixel_format, 0); method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), - bytes_per_pixel, - latency); + cpp, latency); method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, cstate->pipe_src_w, - bytes_per_pixel, - fb->modifier[0], + cpp, fb->modifier[0], latency); - plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel; + plane_bytes_per_line = cstate->pipe_src_w * cpp; plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || @@ -3122,11 +3145,11 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t min_scanlines = 4; uint32_t y_tile_minimum; if (intel_rotation_90_or_270(plane->state->rotation)) { - int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ? + int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? drm_format_plane_cpp(fb->pixel_format, 1) : drm_format_plane_cpp(fb->pixel_format, 0); - switch (bpp) { + switch (cpp) { case 1: min_scanlines = 16; break; @@ -3597,23 +3620,45 @@ static void skl_update_wm(struct drm_crtc *crtc) dev_priv->wm.skl_hw = *results; } -static void ilk_program_watermarks(struct drm_i915_private *dev_priv) +static void ilk_compute_wm_config(struct drm_device *dev, + struct intel_wm_config *config) { - struct drm_device *dev = dev_priv->dev; + struct intel_crtc *crtc; + + /* Compute the currently _active_ config */ + for_each_intel_crtc(dev, crtc) { + const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; + + if (!wm->pipe_enabled) + continue; + + config->sprites_enabled |= wm->sprites_enabled; + config->sprites_scaled |= wm->sprites_scaled; + config->num_pipes_active++; + } +} + +static void ilk_program_watermarks(struct intel_crtc_state *cstate) +{ + struct drm_crtc *crtc = cstate->base.crtc; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; struct ilk_wm_maximums max; - struct intel_wm_config *config = &dev_priv->wm.config; + struct intel_wm_config config = {}; struct ilk_wm_values results = {}; enum intel_ddb_partitioning partitioning; - ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max); - ilk_wm_merge(dev, config, &max, &lp_wm_1_2); + ilk_compute_wm_config(dev, &config); + + ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); + ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); /* 5/6 split only in single pipe config on IVB+ */ if (INTEL_INFO(dev)->gen >= 7 && - config->num_pipes_active == 1 && config->sprites_enabled) { - ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max); - ilk_wm_merge(dev, config, &max, &lp_wm_5_6); + config.num_pipes_active == 1 && config.sprites_enabled) { + ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); + ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); } else { @@ -3630,7 +3675,6 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) static void ilk_update_wm(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); @@ -3650,7 +3694,7 @@ static void ilk_update_wm(struct drm_crtc *crtc) intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; - ilk_program_watermarks(dev_priv); + ilk_program_watermarks(cstate); } static void skl_pipe_wm_active_state(uint32_t val, @@ -4036,7 +4080,7 @@ void intel_update_watermarks(struct drm_crtc *crtc) dev_priv->display.update_wm(crtc); } -/** +/* * Lock protecting IPS related data structures */ DEFINE_SPINLOCK(mchdev_lock); @@ -4509,21 +4553,71 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) } if (HAS_RC6p(dev)) DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); + onoff(mode & GEN6_RC_CTL_RC6_ENABLE), + onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), + onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); else DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); + onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); +} + +static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool enable_rc6 = true; + unsigned long rc6_ctx_base; + + if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { + DRM_DEBUG_KMS("RC6 Base location not set properly.\n"); + enable_rc6 = false; + } + + /* + * The exact context size is not known for BXT, so assume a page size + * for this check. + */ + rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK; + if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) && + (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base + + dev_priv->gtt.stolen_reserved_size))) { + DRM_DEBUG_KMS("RC6 Base address not as expected.\n"); + enable_rc6 = false; + } + + if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) && + ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && + ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && + ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { + DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n"); + enable_rc6 = false; + } + + if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_HW_ENABLE)) && + ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) || + !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) { + DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n"); + enable_rc6 = false; + } + + return enable_rc6; } -static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) +int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) { /* No RC6 before Ironlake and code is gone for ilk. */ if (INTEL_INFO(dev)->gen < 6) return 0; + if (!enable_rc6) + return 0; + + if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { + DRM_INFO("RC6 disabled by BIOS\n"); + return 0; + } + /* Respect the kernel parameter if it is set */ if (enable_rc6 >= 0) { int mask; @@ -4693,8 +4787,7 @@ static void gen9_enable_rc6(struct drm_device *dev) /* 3a: Enable RC6 */ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; - DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? - "on" : "off"); + DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); /* WaRsUseTimeoutMode */ if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { @@ -4713,8 +4806,7 @@ static void gen9_enable_rc6(struct drm_device *dev) * 3b: Enable Coarse Power Gating only when RC6 is enabled. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. */ - if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || - ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0))) + if (NEEDS_WaRsDisableCoarsePowerGating(dev)) I915_WRITE(GEN9_PG_ENABLE, 0); else I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? @@ -6015,7 +6107,6 @@ void intel_init_gt_powersave(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); /* * RPM depends on RC6 to save restore the GT HW context, so make RC6 a * requirement. @@ -6981,6 +7072,7 @@ void intel_init_pm(struct drm_device *dev) dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { dev_priv->display.update_wm = ilk_update_wm; dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; + dev_priv->display.program_watermarks = ilk_program_watermarks; } else { DRM_DEBUG_KMS("Failed to read display plane latency. " "Disable CxSR\n"); @@ -7146,9 +7238,10 @@ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) { int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); - div = vlv_gpu_freq_div(czclk_freq) / 2; + div = vlv_gpu_freq_div(czclk_freq); if (div < 0) return div; + div /= 2; return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2; } @@ -7157,9 +7250,10 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) { int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); - mul = vlv_gpu_freq_div(czclk_freq) / 2; + mul = vlv_gpu_freq_div(czclk_freq); if (mul < 0) return mul; + mul /= 2; /* CHV needs even values */ return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2; diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 9ccff3011523..4ab757947f15 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -225,7 +225,12 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); } - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE); + if (dev_priv->psr.link_standby) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); + else + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE); } static void vlv_psr_enable_source(struct intel_dp *intel_dp) @@ -280,6 +285,9 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) if (IS_HASWELL(dev)) val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; + if (dev_priv->psr.link_standby) + val |= EDP_PSR_LINK_STANDBY; + I915_WRITE(EDP_PSR_CTL, val | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | @@ -304,8 +312,15 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) dev_priv->psr.source_ok = false; - if (IS_HASWELL(dev) && dig_port->port != PORT_A) { - DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); + /* + * HSW spec explicitly says PSR is tied to port A. + * BDW+ platforms with DDI implementation of PSR have different + * PSR registers per transcoder and we only implement transcoder EDP + * ones. Since by Display design transcoder EDP is tied to port A + * we can safely escape based on the port A. + */ + if (HAS_DDI(dev) && dig_port->port != PORT_A) { + DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); return false; } @@ -314,6 +329,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) return false; } + if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && + !dev_priv->psr.link_standby) { + DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n"); + return false; + } + if (IS_HASWELL(dev) && I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) & S3D_ENABLE) { @@ -327,12 +348,6 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) return false; } - if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && - ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) { - DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n"); - return false; - } - dev_priv->psr.source_ok = true; return true; } @@ -763,6 +778,27 @@ void intel_psr_init(struct drm_device *dev) dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; + /* Set link_standby x link_off defaults */ + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + /* HSW and BDW require workarounds that we don't implement. */ + dev_priv->psr.link_standby = false; + else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + /* On VLV and CHV only standby mode is supported. */ + dev_priv->psr.link_standby = true; + else + /* For new platforms let's respect VBT back again */ + dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; + + /* Override link_standby x link_off defaults */ + if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) { + DRM_DEBUG_KMS("PSR: Forcing link standby\n"); + dev_priv->psr.link_standby = true; + } + if (i915.enable_psr == 3 && dev_priv->psr.link_standby) { + DRM_DEBUG_KMS("PSR: Forcing main link off\n"); + dev_priv->psr.link_standby = false; + } + INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); mutex_init(&dev_priv->psr.lock); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 40c6aff57256..133321a5b3d0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -789,6 +789,22 @@ static int wa_add(struct drm_i915_private *dev_priv, #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) +static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct i915_workarounds *wa = &dev_priv->workarounds; + const uint32_t index = wa->hw_whitelist_count[ring->id]; + + if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) + return -EINVAL; + + WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index), + i915_mmio_reg_offset(reg)); + wa->hw_whitelist_count[ring->id]++; + + return 0; +} + static int gen8_init_workarounds(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; @@ -894,6 +910,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; + int ret; /* WaEnableLbsSlaRetryTimerDecrement:skl */ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | @@ -964,6 +981,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) /* WaDisableSTUnitPowerOptimization:skl,bxt */ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); + /* WaOCLCoherentLineFlush:skl,bxt */ + I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | + GEN8_LQSC_FLUSH_COHERENT_LINES)); + + /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */ + ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1); + if (ret) + return ret; + + /* WaAllowUMDToModifyHDCChicken1:skl,bxt */ + ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1); + if (ret) + return ret; + return 0; } @@ -1019,6 +1050,16 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) if (ret) return ret; + /* + * Actual WA is to disable percontext preemption granularity control + * until D0 which is the default case so this is equivalent to + * !WaDisablePerCtxtPreemptionGranularityControl:skl + */ + if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { + I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, + _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + } + if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ I915_WRITE(FF_SLICE_CS_CHICKEN2, @@ -1071,6 +1112,11 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + /* WaDisableLSQCROPERFforOCL:skl */ + ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4); + if (ret) + return ret; + return skl_tune_iz_hashing(ring); } @@ -1106,6 +1152,20 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); } + /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */ + /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ + /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ + /* WaDisableLSQCROPERFforOCL:bxt */ + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1); + if (ret) + return ret; + + ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4); + if (ret) + return ret; + } + return 0; } @@ -1117,6 +1177,7 @@ int init_workarounds_ring(struct intel_engine_cs *ring) WARN_ON(ring->id != RCS); dev_priv->workarounds.count = 0; + dev_priv->workarounds.hw_whitelist_count[RCS] = 0; if (IS_BROADWELL(dev)) return bdw_init_workarounds(ring); @@ -1867,15 +1928,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, offset = cs_offset; } - ret = intel_ring_begin(req, 4); + ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; @@ -1901,6 +1960,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, return 0; } +static void cleanup_phys_status_page(struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = to_i915(ring->dev); + + if (!dev_priv->status_page_dmah) + return; + + drm_pci_free(ring->dev, dev_priv->status_page_dmah); + ring->status_page.page_addr = NULL; +} + static void cleanup_status_page(struct intel_engine_cs *ring) { struct drm_i915_gem_object *obj; @@ -1917,9 +1987,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring) static int init_status_page(struct intel_engine_cs *ring) { - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj = ring->status_page.obj; - if ((obj = ring->status_page.obj) == NULL) { + if (obj == NULL) { unsigned flags; int ret; @@ -1990,6 +2060,7 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) else iounmap(ringbuf->virtual_start); ringbuf->virtual_start = NULL; + ringbuf->vma = NULL; i915_gem_object_ggtt_unpin(ringbuf->obj); } @@ -2048,6 +2119,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, return ret; } + /* Access through the GTT requires the device to be awake. */ + assert_rpm_wakelock_held(dev_priv); + ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), ringbuf->size); if (ringbuf->virtual_start == NULL) { @@ -2056,6 +2130,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, } } + ringbuf->vma = i915_gem_obj_to_ggtt(obj); + return 0; } @@ -2164,7 +2240,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, if (ret) goto error; } else { - BUG_ON(ring->id != RCS); + WARN_ON(ring->id != RCS); ret = init_phys_status_page(ring); if (ret) goto error; @@ -2210,7 +2286,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) if (ring->cleanup) ring->cleanup(ring); - cleanup_status_page(ring); + if (I915_NEED_GFX_HWS(ring->dev)) { + cleanup_status_page(ring); + } else { + WARN_ON(ring->id != RCS); + cleanup_phys_status_page(ring); + } i915_cmd_parser_fini_ring(ring); i915_gem_batch_pool_fini(&ring->batch_pool); @@ -2666,6 +2747,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->name = "render ring"; ring->id = RCS; + ring->exec_id = I915_EXEC_RENDER; ring->mmio_base = RENDER_RING_BASE; if (INTEL_INFO(dev)->gen >= 8) { @@ -2814,6 +2896,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->name = "bsd ring"; ring->id = VCS; + ring->exec_id = I915_EXEC_BSD; ring->write_tail = ring_write_tail; if (INTEL_INFO(dev)->gen >= 6) { @@ -2890,6 +2973,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) ring->name = "bsd2 ring"; ring->id = VCS2; + ring->exec_id = I915_EXEC_BSD; ring->write_tail = ring_write_tail; ring->mmio_base = GEN8_BSD2_RING_BASE; @@ -2920,6 +3004,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ring->name = "blitter ring"; ring->id = BCS; + ring->exec_id = I915_EXEC_BLT; ring->mmio_base = BLT_RING_BASE; ring->write_tail = ring_write_tail; @@ -2977,6 +3062,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ring->name = "video enhancement ring"; ring->id = VECS; + ring->exec_id = I915_EXEC_VEBOX; ring->mmio_base = VEBOX_RING_BASE; ring->write_tail = ring_write_tail; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 49574ffe54bc..566b0ae10ce0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -93,11 +93,13 @@ struct intel_ring_hangcheck { int score; enum intel_ring_hangcheck_action action; int deadlock; + u32 instdone[I915_NUM_INSTDONE_REG]; }; struct intel_ringbuffer { struct drm_i915_gem_object *obj; void __iomem *virtual_start; + struct i915_vma *vma; struct intel_engine_cs *ring; struct list_head link; @@ -147,14 +149,16 @@ struct i915_ctx_workarounds { struct intel_engine_cs { const char *name; enum intel_ring_id { - RCS = 0x0, - VCS, + RCS = 0, BCS, - VECS, - VCS2 + VCS, + VCS2, /* Keep instances of the same type engine together. */ + VECS } id; #define I915_NUM_RINGS 5 -#define LAST_USER_RING (VECS + 1) +#define _VCS(n) (VCS + (n)) + unsigned int exec_id; + unsigned int guc_id; u32 mmio_base; struct drm_device *dev; struct intel_ringbuffer *buffer; @@ -268,6 +272,8 @@ struct intel_engine_cs { struct list_head execlist_queue; struct list_head execlist_retired_req_list; u8 next_context_status_buffer; + bool disable_lite_restore_wa; + u32 ctx_desc_template; u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ int (*emit_request)(struct drm_i915_gem_request *request); int (*emit_flush)(struct drm_i915_gem_request *request, @@ -305,7 +311,6 @@ struct intel_engine_cs { wait_queue_head_t irq_queue; - struct intel_context *default_context; struct intel_context *last_context; struct intel_ring_hangcheck hangcheck; @@ -406,7 +411,7 @@ intel_write_status_page(struct intel_engine_cs *ring, ring->status_page.page_addr[reg] = value; } -/** +/* * Reads a dword out of the status page, which is written to from the command * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or * MI_STORE_DATA_IMM. @@ -423,6 +428,7 @@ intel_write_status_page(struct intel_engine_cs *ring, * The area from dword 0x30 to 0x3ff is available for driver usage. */ #define I915_GEM_HWS_INDEX 0x30 +#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) #define I915_GEM_HWS_SCRATCH_INDEX 0x40 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index ddbdbffe829a..bbca527184d0 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -532,7 +532,8 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, SKL_DISP_PW_2); - WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n"); + WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev), + "Platform doesn't support DC5.\n"); WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); @@ -568,7 +569,8 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n"); + WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev), + "Platform doesn't support DC6.\n"); WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, "Backlight is not disabled.\n"); @@ -595,7 +597,8 @@ static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv) { assert_can_disable_dc5(dev_priv); - if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1) + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && + i915.enable_dc != 0 && i915.enable_dc != 1) assert_can_disable_dc6(dev_priv); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -623,7 +626,6 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv) static void skl_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { - struct drm_device *dev = dev_priv->dev; uint32_t tmp, fuse_status; uint32_t req_mask, state_mask; bool is_enabled, enable_requested, check_fuse_status = false; @@ -667,17 +669,6 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, !I915_READ(HSW_PWR_WELL_BIOS), "Invalid for power well status to be enabled, unless done by the BIOS, \ when request is to disable!\n"); - if (power_well->data == SKL_DISP_PW_2) { - /* - * DDI buffer programming unnecessary during - * driver-load/resume as it's already done - * during modeset initialization then. It's - * also invalid here as encoder list is still - * uninitialized. - */ - if (!dev_priv->power_domains.initializing) - intel_prepare_ddi(dev); - } I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); } @@ -783,7 +774,8 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1) + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && + i915.enable_dc != 0 && i915.enable_dc != 1) skl_enable_dc6(dev_priv); else gen9_enable_dc5(dev_priv); @@ -795,7 +787,8 @@ static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, if (power_well->count > 0) { gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } else { - if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && + i915.enable_dc != 0 && i915.enable_dc != 1) gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); else @@ -1851,7 +1844,7 @@ void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv) { struct i915_power_well *well; - if (!IS_SKYLAKE(dev_priv)) + if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) return; well = lookup_power_well(dev_priv, SKL_DISP_PW_1); @@ -1865,7 +1858,7 @@ void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv) { struct i915_power_well *well; - if (!IS_SKYLAKE(dev_priv)) + if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) return; well = lookup_power_well(dev_priv, SKL_DISP_PW_1); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 2e1da060b0e1..4ecc076c4041 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1527,6 +1527,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -1537,6 +1538,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector, if (intel_sdvo->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; + if (mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + if (intel_sdvo->is_lvds) { if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) return MODE_PANEL; diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 2e2d4eb4a00d..db0ed499268a 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -24,8 +24,8 @@ * Eric Anholt <eric@anholt.net> */ -/** - * @file SDVO command definitions and structures. +/* + * SDVO command definitions and structures. */ #define SDVO_OUTPUT_FIRST (0) @@ -66,39 +66,39 @@ struct intel_sdvo_caps { #define DTD_FLAG_VSYNC_POSITIVE (1 << 2) #define DTD_FLAG_INTERLACE (1 << 7) -/** This matches the EDID DTD structure, more or less */ +/* This matches the EDID DTD structure, more or less */ struct intel_sdvo_dtd { struct { - u16 clock; /**< pixel clock, in 10kHz units */ - u8 h_active; /**< lower 8 bits (pixels) */ - u8 h_blank; /**< lower 8 bits (pixels) */ - u8 h_high; /**< upper 4 bits each h_active, h_blank */ - u8 v_active; /**< lower 8 bits (lines) */ - u8 v_blank; /**< lower 8 bits (lines) */ - u8 v_high; /**< upper 4 bits each v_active, v_blank */ + u16 clock; /* pixel clock, in 10kHz units */ + u8 h_active; /* lower 8 bits (pixels) */ + u8 h_blank; /* lower 8 bits (pixels) */ + u8 h_high; /* upper 4 bits each h_active, h_blank */ + u8 v_active; /* lower 8 bits (lines) */ + u8 v_blank; /* lower 8 bits (lines) */ + u8 v_high; /* upper 4 bits each v_active, v_blank */ } part1; struct { - u8 h_sync_off; /**< lower 8 bits, from hblank start */ - u8 h_sync_width; /**< lower 8 bits (pixels) */ - /** lower 4 bits each vsync offset, vsync width */ + u8 h_sync_off; /* lower 8 bits, from hblank start */ + u8 h_sync_width; /* lower 8 bits (pixels) */ + /* lower 4 bits each vsync offset, vsync width */ u8 v_sync_off_width; - /** + /* * 2 high bits of hsync offset, 2 high bits of hsync width, * bits 4-5 of vsync offset, and 2 high bits of vsync width. */ u8 sync_off_width_high; u8 dtd_flags; u8 sdvo_flags; - /** bits 6-7 of vsync offset at bits 6-7 */ + /* bits 6-7 of vsync offset at bits 6-7 */ u8 v_sync_off_high; u8 reserved; } part2; } __packed; struct intel_sdvo_pixel_clock_range { - u16 min; /**< pixel clock, in 10kHz units */ - u16 max; /**< pixel clock, in 10kHz units */ + u16 min; /* pixel clock, in 10kHz units */ + u16 max; /* pixel clock, in 10kHz units */ } __packed; struct intel_sdvo_preferred_input_timing_args { @@ -144,7 +144,7 @@ struct intel_sdvo_preferred_input_timing_args { #define SDVO_CMD_RESET 0x01 -/** Returns a struct intel_sdvo_caps */ +/* Returns a struct intel_sdvo_caps */ #define SDVO_CMD_GET_DEVICE_CAPS 0x02 #define SDVO_CMD_GET_FIRMWARE_REV 0x86 @@ -152,7 +152,7 @@ struct intel_sdvo_preferred_input_timing_args { # define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1 # define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2 -/** +/* * Reports which inputs are trained (managed to sync). * * Devices must have trained within 2 vsyncs of a mode change. @@ -164,10 +164,10 @@ struct intel_sdvo_get_trained_inputs_response { unsigned int pad:6; } __packed; -/** Returns a struct intel_sdvo_output_flags of active outputs. */ +/* Returns a struct intel_sdvo_output_flags of active outputs. */ #define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 -/** +/* * Sets the current set of active outputs. * * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP @@ -175,7 +175,7 @@ struct intel_sdvo_get_trained_inputs_response { */ #define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05 -/** +/* * Returns the current mapping of SDVO inputs to outputs on the device. * * Returns two struct intel_sdvo_output_flags structures. @@ -185,29 +185,29 @@ struct intel_sdvo_in_out_map { u16 in0, in1; }; -/** +/* * Sets the current mapping of SDVO inputs to outputs on the device. * * Takes two struct i380_sdvo_output_flags structures. */ #define SDVO_CMD_SET_IN_OUT_MAP 0x07 -/** +/* * Returns a struct intel_sdvo_output_flags of attached displays. */ #define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b -/** +/* * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging. */ #define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c -/** +/* * Takes a struct intel_sdvo_output_flags. */ #define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d -/** +/* * Returns a struct intel_sdvo_output_flags of displays with hot plug * interrupts enabled. */ @@ -221,7 +221,7 @@ struct intel_sdvo_get_interrupt_event_source_response { unsigned int pad:6; } __packed; -/** +/* * Selects which input is affected by future input commands. * * Commands affected include SET_INPUT_TIMINGS_PART[12], @@ -234,7 +234,7 @@ struct intel_sdvo_set_target_input_args { unsigned int pad:7; } __packed; -/** +/* * Takes a struct intel_sdvo_output_flags of which outputs are targeted by * future output commands. * @@ -280,7 +280,7 @@ struct intel_sdvo_set_target_input_args { # define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4) # define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6 -/** +/* * Generates a DTD based on the given width, height, and flags. * * This will be supported by any device supporting scaling or interlaced @@ -300,24 +300,24 @@ struct intel_sdvo_set_target_input_args { #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c -/** Returns a struct intel_sdvo_pixel_clock_range */ +/* Returns a struct intel_sdvo_pixel_clock_range */ #define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d -/** Returns a struct intel_sdvo_pixel_clock_range */ +/* Returns a struct intel_sdvo_pixel_clock_range */ #define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e -/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ +/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ #define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f -/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ +/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ #define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20 -/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ +/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ #define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21 # define SDVO_CLOCK_RATE_MULT_1X (1 << 0) # define SDVO_CLOCK_RATE_MULT_2X (1 << 1) # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 -/** 6 bytes of bit flags for TV formats shared by all TV format functions */ +/* 6 bytes of bit flags for TV formats shared by all TV format functions */ struct intel_sdvo_tv_format { unsigned int ntsc_m:1; unsigned int ntsc_j:1; @@ -376,7 +376,7 @@ struct intel_sdvo_tv_format { #define SDVO_CMD_SET_TV_FORMAT 0x29 -/** Returns the resolutiosn that can be used with the given TV format */ +/* Returns the resolutiosn that can be used with the given TV format */ #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83 struct intel_sdvo_sdtv_resolution_request { unsigned int ntsc_m:1; @@ -539,7 +539,7 @@ struct intel_sdvo_hdtv_resolution_reply { #define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d #define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e #define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f -/** +/* * The panel power sequencing parameters are in units of milliseconds. * The high fields are bits 8:9 of the 10-bit values. */ diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 8831fc579ade..c3998188cf35 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -129,17 +129,18 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) return val; } -u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg) +u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port, SB_CRRDDA_NP, reg, &val); return val; } -void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) +void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, + u8 port, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port, SB_CRWRDA_NP, reg, &val); } @@ -171,20 +172,6 @@ void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) SB_CRWRDA_NP, reg, &val); } -u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg) -{ - u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, - SB_CRRDDA_NP, reg, &val); - return val; -} - -void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) -{ - vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, - SB_CRWRDA_NP, reg, &val); -} - u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = 0; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 4ff7a1f4183e..a2582c455b36 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -178,28 +178,33 @@ void intel_pipe_update_end(struct intel_crtc *crtc) } static void -skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h) +skl_update_plane(struct drm_plane *drm_plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_device *dev = drm_plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(drm_plane); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; u32 plane_ctl, stride_div, stride; - const struct drm_intel_sprite_colorkey *key = - &to_intel_plane_state(drm_plane->state)->ckey; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 surf_addr; u32 tile_height, plane_offset, plane_size; unsigned int rotation; int x_offset, y_offset; - struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config; - int scaler_id; + int crtc_x = plane_state->dst.x1; + int crtc_y = plane_state->dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->dst); + uint32_t crtc_h = drm_rect_height(&plane_state->dst); + uint32_t x = plane_state->src.x1 >> 16; + uint32_t y = plane_state->src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; + const struct intel_scaler *scaler = + &crtc_state->scaler_state.scalers[plane_state->scaler_id]; plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | @@ -208,14 +213,12 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, plane_ctl |= skl_plane_ctl_format(fb->pixel_format); plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); - rotation = drm_plane->state->rotation; + rotation = plane_state->base.rotation; plane_ctl |= skl_plane_ctl_rotation(rotation); - stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], + stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], fb->pixel_format); - scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id; - /* Sizes are 0 based */ src_w--; src_h--; @@ -236,9 +239,10 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, surf_addr = intel_plane_obj_offset(intel_plane, obj, 0); if (intel_rotation_90_or_270(rotation)) { + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + /* stride: Surface height in tiles */ - tile_height = intel_tile_height(dev, fb->pixel_format, - fb->modifier[0], 0); + tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); stride = DIV_ROUND_UP(fb->height, tile_height); plane_size = (src_w << 16) | src_h; x_offset = stride * tile_height - y - (src_h + 1); @@ -256,13 +260,13 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, I915_WRITE(PLANE_SIZE(pipe, plane), plane_size); /* program plane scaler */ - if (scaler_id >= 0) { + if (plane_state->scaler_id >= 0) { uint32_t ps_ctrl = 0; + int scaler_id = plane_state->scaler_id; DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, PS_PLANE_SEL(plane)); - ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | - crtc_state->scaler_state.scalers[scaler_id].mode; + ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode; I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); @@ -334,24 +338,29 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format) } static void -vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h) +vlv_update_plane(struct drm_plane *dplane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_device *dev = dplane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(dplane); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; int plane = intel_plane->plane; u32 sprctl; - unsigned long sprsurf_offset, linear_offset; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); - const struct drm_intel_sprite_colorkey *key = - &to_intel_plane_state(dplane->state)->ckey; + u32 sprsurf_offset, linear_offset; + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + int crtc_x = plane_state->dst.x1; + int crtc_y = plane_state->dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->dst); + uint32_t crtc_h = drm_rect_height(&plane_state->dst); + uint32_t x = plane_state->src.x1 >> 16; + uint32_t y = plane_state->src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; sprctl = SP_ENABLE; @@ -413,20 +422,18 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, crtc_w--; crtc_h--; - linear_offset = y * fb->pitches[0] + x * pixel_size; - sprsurf_offset = intel_gen4_compute_page_offset(dev_priv, - &x, &y, - obj->tiling_mode, - pixel_size, - fb->pitches[0]); + linear_offset = y * fb->pitches[0] + x * cpp; + sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], cpp, + fb->pitches[0]); linear_offset -= sprsurf_offset; - if (dplane->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { sprctl |= SP_ROTATE_180; x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * pixel_size; + linear_offset += src_h * fb->pitches[0] + src_w * cpp; } if (key->flags) { @@ -474,23 +481,28 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) } static void -ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h) +ivb_update_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); enum pipe pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; - unsigned long sprsurf_offset, linear_offset; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); - const struct drm_intel_sprite_colorkey *key = - &to_intel_plane_state(plane->state)->ckey; + u32 sprsurf_offset, linear_offset; + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + int crtc_x = plane_state->dst.x1; + int crtc_y = plane_state->dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->dst); + uint32_t crtc_h = drm_rect_height(&plane_state->dst); + uint32_t x = plane_state->src.x1 >> 16; + uint32_t y = plane_state->src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; sprctl = SPRITE_ENABLE; @@ -543,22 +555,20 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (crtc_w != src_w || crtc_h != src_h) sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = y * fb->pitches[0] + x * pixel_size; - sprsurf_offset = - intel_gen4_compute_page_offset(dev_priv, - &x, &y, obj->tiling_mode, - pixel_size, fb->pitches[0]); + linear_offset = y * fb->pitches[0] + x * cpp; + sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], cpp, + fb->pitches[0]); linear_offset -= sprsurf_offset; - if (plane->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { sprctl |= SPRITE_ROTATE_180; /* HSW and BDW does this automagically in hardware */ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + - src_w * pixel_size; + linear_offset += src_h * fb->pitches[0] + src_w * cpp; } } @@ -612,23 +622,28 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) } static void -ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h) +ilk_update_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; - unsigned long dvssurf_offset, linear_offset; u32 dvscntr, dvsscale; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); - const struct drm_intel_sprite_colorkey *key = - &to_intel_plane_state(plane->state)->ckey; + u32 dvssurf_offset, linear_offset; + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + int crtc_x = plane_state->dst.x1; + int crtc_y = plane_state->dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->dst); + uint32_t crtc_h = drm_rect_height(&plane_state->dst); + uint32_t x = plane_state->src.x1 >> 16; + uint32_t y = plane_state->src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; dvscntr = DVS_ENABLE; @@ -677,19 +692,18 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = y * fb->pitches[0] + x * pixel_size; - dvssurf_offset = - intel_gen4_compute_page_offset(dev_priv, - &x, &y, obj->tiling_mode, - pixel_size, fb->pitches[0]); + linear_offset = y * fb->pitches[0] + x * cpp; + dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], cpp, + fb->pitches[0]); linear_offset -= dvssurf_offset; - if (plane->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { dvscntr |= DVS_ROTATE_180; x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * pixel_size; + linear_offset += src_h * fb->pitches[0] + src_w * cpp; } if (key->flags) { @@ -754,7 +768,6 @@ intel_check_sprite_plane(struct drm_plane *plane, int hscale, vscale; int max_scale, min_scale; bool can_scale; - int pixel_size; if (!fb) { state->visible = false; @@ -876,6 +889,7 @@ intel_check_sprite_plane(struct drm_plane *plane, /* Check size restrictions when scaling */ if (state->visible && (src_w != crtc_w || src_h != crtc_h)) { unsigned int width_bytes; + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); WARN_ON(!can_scale); @@ -887,9 +901,7 @@ intel_check_sprite_plane(struct drm_plane *plane, if (src_w < 3 || src_h < 3) state->visible = false; - pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); - width_bytes = ((src_x * pixel_size) & 63) + - src_w * pixel_size; + width_bytes = ((src_x * cpp) & 63) + src_w * cpp; if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 || width_bytes > 4096 || fb->pitches[0] > 4096)) { @@ -913,30 +925,6 @@ intel_check_sprite_plane(struct drm_plane *plane, return 0; } -static void -intel_commit_sprite_plane(struct drm_plane *plane, - struct intel_plane_state *state) -{ - struct drm_crtc *crtc = state->base.crtc; - struct intel_plane *intel_plane = to_intel_plane(plane); - struct drm_framebuffer *fb = state->base.fb; - - crtc = crtc ? crtc : plane->crtc; - - if (state->visible) { - intel_plane->update_plane(plane, crtc, fb, - state->dst.x1, state->dst.y1, - drm_rect_width(&state->dst), - drm_rect_height(&state->dst), - state->src.x1 >> 16, - state->src.y1 >> 16, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16); - } else { - intel_plane->disable_plane(plane, crtc); - } -} - int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1118,7 +1106,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) intel_plane->plane = plane; intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane); intel_plane->check_plane = intel_check_sprite_plane; - intel_plane->commit_plane = intel_commit_sprite_plane; possible_crtcs = (1 << pipe); ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, &intel_plane_funcs, diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 948cbff6c62e..d21f75bda96e 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -897,6 +897,10 @@ intel_tv_mode_valid(struct drm_connector *connector, { struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + + if (mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; /* Ensure TV refresh is close to desired refresh */ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) @@ -1420,6 +1424,7 @@ intel_tv_get_modes(struct drm_connector *connector) if (!mode_ptr) continue; strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); + mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0'; mode_ptr->hdisplay = hactive_s; mode_ptr->hsync_start = hactive_s + 1; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 277e60ae0e47..436d8f2b8682 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -327,13 +327,54 @@ static void intel_uncore_ellc_detect(struct drm_device *dev) } } +static bool +fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +{ + u32 dbg; + + dbg = __raw_i915_read32(dev_priv, FPGA_DBG); + if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) + return false; + + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + + return true; +} + +static bool +vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +{ + u32 cer; + + cer = __raw_i915_read32(dev_priv, CLAIM_ER); + if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) + return false; + + __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); + + return true; +} + +static bool +check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +{ + if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) + return fpga_check_for_unclaimed_mmio(dev_priv); + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + return vlv_check_for_unclaimed_mmio(dev_priv); + + return false; +} + static void __intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) { struct drm_i915_private *dev_priv = dev->dev_private; - if (HAS_FPGA_DBG_UNCLAIMED(dev)) - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + /* clear out unclaimed reg detection bit */ + if (check_for_unclaimed_mmio(dev_priv)) + DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); /* clear out old GT FIFO errors */ if (IS_GEN6(dev) || IS_GEN7(dev)) @@ -359,6 +400,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) void intel_uncore_sanitize(struct drm_device *dev) { + i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); + /* BIOS often leaves RC6 enabled, but disable it for hw init */ intel_disable_gt_powersave(dev); } @@ -585,38 +628,38 @@ ilk_dummy_write(struct drm_i915_private *dev_priv) } static void -hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, - i915_reg_t reg, bool read, bool before) +__unclaimed_reg_debug(struct drm_i915_private *dev_priv, + const i915_reg_t reg, + const bool read, + const bool before) { - const char *op = read ? "reading" : "writing to"; - const char *when = before ? "before" : "after"; - - if (!i915.mmio_debug) + /* XXX. We limit the auto arming traces for mmio + * debugs on these platforms. There are just too many + * revealed by these and CI/Bat suffers from the noise. + * Please fix and then re-enable the automatic traces. + */ + if (i915.mmio_debug < 2 && + (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) return; - if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { - WARN(1, "Unclaimed register detected %s %s register 0x%x\n", - when, op, i915_mmio_reg_offset(reg)); - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + if (WARN(check_for_unclaimed_mmio(dev_priv), + "Unclaimed register detected %s %s register 0x%x\n", + before ? "before" : "after", + read ? "reading" : "writing to", + i915_mmio_reg_offset(reg))) i915.mmio_debug--; /* Only report the first N failures */ - } } -static void -hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) +static inline void +unclaimed_reg_debug(struct drm_i915_private *dev_priv, + const i915_reg_t reg, + const bool read, + const bool before) { - static bool mmio_debug_once = true; - - if (i915.mmio_debug || !mmio_debug_once) + if (likely(!i915.mmio_debug)) return; - if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { - DRM_DEBUG("Unclaimed register detected, " - "enabling oneshot unclaimed register reporting. " - "Please use i915.mmio_debug=N for more information.\n"); - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); - i915.mmio_debug = mmio_debug_once--; - } + __unclaimed_reg_debug(dev_priv, reg, read, before); } #define GEN2_READ_HEADER(x) \ @@ -664,9 +707,11 @@ __gen2_read(64) unsigned long irqflags; \ u##x val = 0; \ assert_rpm_wakelock_held(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ + unclaimed_reg_debug(dev_priv, reg, true, true) #define GEN6_READ_FOOTER \ + unclaimed_reg_debug(dev_priv, reg, true, false); \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ return val @@ -699,11 +744,9 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv, static u##x \ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ GEN6_READ_HEADER(x); \ - hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ if (NEEDS_FORCE_WAKE(offset)) \ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ val = __raw_i915_read##x(dev_priv, reg); \ - hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ GEN6_READ_FOOTER; \ } @@ -751,7 +794,6 @@ static u##x \ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ if (!SKL_NEEDS_FORCE_WAKE(offset)) \ fw_engine = 0; \ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ @@ -765,7 +807,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ if (fw_engine) \ __force_wake_get(dev_priv, fw_engine); \ val = __raw_i915_read##x(dev_priv, reg); \ - hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ GEN6_READ_FOOTER; \ } @@ -864,9 +905,11 @@ __gen2_write(64) unsigned long irqflags; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ assert_rpm_wakelock_held(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ + unclaimed_reg_debug(dev_priv, reg, false, true) #define GEN6_WRITE_FOOTER \ + unclaimed_reg_debug(dev_priv, reg, false, false); \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) #define __gen6_write(x) \ @@ -892,13 +935,10 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t if (NEEDS_FORCE_WAKE(offset)) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ __raw_i915_write##x(dev_priv, reg, val); \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ - hsw_unclaimed_reg_detect(dev_priv); \ GEN6_WRITE_FOOTER; \ } @@ -928,12 +968,9 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, static void \ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ GEN6_WRITE_HEADER; \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ __raw_i915_write##x(dev_priv, reg, val); \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ - hsw_unclaimed_reg_detect(dev_priv); \ GEN6_WRITE_FOOTER; \ } @@ -987,7 +1024,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \ bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ if (!SKL_NEEDS_FORCE_WAKE(offset) || \ is_gen9_shadowed(dev_priv, reg)) \ fw_engine = 0; \ @@ -1002,8 +1038,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \ if (fw_engine) \ __force_wake_get(dev_priv, fw_engine); \ __raw_i915_write##x(dev_priv, reg, val); \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ - hsw_unclaimed_reg_detect(dev_priv); \ GEN6_WRITE_FOOTER; \ } @@ -1223,6 +1257,8 @@ void intel_uncore_init(struct drm_device *dev) intel_uncore_fw_domains_init(dev); __intel_uncore_early_sanitize(dev, false); + dev_priv->uncore.unclaimed_mmio_check = 1; + switch (INTEL_INFO(dev)->gen) { default: case 9: @@ -1580,13 +1616,26 @@ bool intel_has_gpu_reset(struct drm_device *dev) return intel_get_gpu_reset(dev) != NULL; } -void intel_uncore_check_errors(struct drm_device *dev) +bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + return check_for_unclaimed_mmio(dev_priv); +} + +bool +intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) +{ + if (unlikely(i915.mmio_debug || + dev_priv->uncore.unclaimed_mmio_check <= 0)) + return false; - if (HAS_FPGA_DBG_UNCLAIMED(dev) && - (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { - DRM_ERROR("Unclaimed register before interrupt\n"); - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { + DRM_DEBUG("Unclaimed register detected, " + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); + i915.mmio_debug++; + dev_priv->uncore.unclaimed_mmio_check--; + return true; } + + return false; } diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 063825fecbe2..2a95d10e9d92 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -109,13 +109,6 @@ static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder) { } -static bool dw_hdmi_imx_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adj_mode) -{ - return true; -} - static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adj_mode) @@ -125,7 +118,7 @@ static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder, static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder) { struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder); - int mux = imx_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder); + int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder); regmap_update_bits(hdmi->regmap, IOMUXC_GPR3, IMX6Q_GPR3_HDMI_MUX_CTL_MASK, @@ -138,7 +131,6 @@ static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { - .mode_fixup = dw_hdmi_imx_encoder_mode_fixup, .mode_set = dw_hdmi_imx_encoder_mode_set, .prepare = dw_hdmi_imx_encoder_prepare, .commit = dw_hdmi_imx_encoder_commit, diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 2f57d7967417..9876e0f0c3e1 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -17,7 +17,6 @@ #include <linux/device.h> #include <linux/fb.h> #include <linux/module.h> -#include <linux/of_graph.h> #include <linux/platform_device.h> #include <drm/drmP.h> #include <drm/drm_fb_helper.h> @@ -171,18 +170,6 @@ static void imx_drm_disable_vblank(struct drm_device *drm, unsigned int pipe) imx_drm_crtc->imx_drm_helper_funcs.disable_vblank(imx_drm_crtc->crtc); } -static void imx_drm_driver_preclose(struct drm_device *drm, - struct drm_file *file) -{ - int i; - - if (!file->is_master) - return; - - for (i = 0; i < MAX_CRTC; i++) - imx_drm_disable_vblank(drm, i); -} - static const struct file_operations imx_drm_driver_fops = { .owner = THIS_MODULE, .open = drm_open, @@ -424,36 +411,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm, } EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of); -/* - * @node: device tree node containing encoder input ports - * @encoder: drm_encoder - */ -int imx_drm_encoder_get_mux_id(struct device_node *node, - struct drm_encoder *encoder) -{ - struct imx_drm_crtc *imx_crtc = imx_drm_find_crtc(encoder->crtc); - struct device_node *ep; - struct of_endpoint endpoint; - struct device_node *port; - int ret; - - if (!node || !imx_crtc) - return -EINVAL; - - for_each_endpoint_of_node(node, ep) { - port = of_graph_get_remote_port(ep); - of_node_put(port); - if (port == imx_crtc->crtc->port) { - ret = of_graph_parse_endpoint(ep, &endpoint); - of_node_put(ep); - return ret ? ret : endpoint.port; - } - } - - return -EINVAL; -} -EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id); - static const struct drm_ioctl_desc imx_drm_ioctls[] = { /* none so far */ }; @@ -463,7 +420,6 @@ static struct drm_driver imx_drm_driver = { .load = imx_drm_driver_load, .unload = imx_drm_driver_unload, .lastclose = imx_drm_driver_lastclose, - .preclose = imx_drm_driver_preclose, .set_busid = drm_platform_set_busid, .gem_free_object = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index 71cf6d9c714f..b0241b9d1334 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -46,8 +46,6 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format); -int imx_drm_encoder_get_mux_id(struct device_node *node, - struct drm_encoder *encoder); int imx_drm_encoder_parse_of(struct drm_device *drm, struct drm_encoder *encoder, struct device_node *np); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 22ac482231ed..a58eee59550a 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -19,6 +19,7 @@ #include <drm/drmP.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_of.h> #include <drm/drm_panel.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> @@ -139,13 +140,6 @@ static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode) { } -static bool imx_ldb_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno, unsigned long serial_clk, unsigned long di_clk) { @@ -215,7 +209,7 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder) struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); struct imx_ldb *ldb = imx_ldb_ch->ldb; int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; - int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); + int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); drm_panel_prepare(imx_ldb_ch->panel); @@ -265,7 +259,7 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; unsigned long serial_clk; unsigned long di_clk = mode->clock * 1000; - int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); + int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); if (mode->clock > 170000) { dev_warn(ldb->dev, @@ -376,7 +370,6 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = { static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { .dpms = imx_ldb_encoder_dpms, - .mode_fixup = imx_ldb_encoder_mode_fixup, .prepare = imx_ldb_encoder_prepare, .commit = imx_ldb_encoder_commit, .mode_set = imx_ldb_encoder_mode_set, diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 292349f0b132..ae7a9fb3b8a2 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -286,13 +286,6 @@ static void imx_tve_encoder_dpms(struct drm_encoder *encoder, int mode) dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret); } -static bool imx_tve_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void imx_tve_encoder_prepare(struct drm_encoder *encoder) { struct imx_tve *tve = enc_to_tve(encoder); @@ -379,7 +372,6 @@ static const struct drm_encoder_funcs imx_tve_encoder_funcs = { static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = { .dpms = imx_tve_encoder_dpms, - .mode_fixup = imx_tve_encoder_mode_fixup, .prepare = imx_tve_encoder_prepare, .mode_set = imx_tve_encoder_mode_set, .commit = imx_tve_encoder_commit, diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 30a57185bdb4..4bea0ddb3f3e 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -22,6 +22,8 @@ #include <linux/fb.h> #include <linux/clk.h> #include <linux/errno.h> +#include <linux/reservation.h> +#include <linux/dma-buf.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> @@ -31,6 +33,23 @@ #define DRIVER_DESC "i.MX IPUv3 Graphics" +enum ipu_flip_status { + IPU_FLIP_NONE, + IPU_FLIP_PENDING, + IPU_FLIP_SUBMITTED, +}; + +struct ipu_flip_work { + struct work_struct unref_work; + struct drm_gem_object *bo; + struct drm_pending_vblank_event *page_flip_event; + struct work_struct fence_work; + struct ipu_crtc *crtc; + struct fence *excl; + unsigned shared_count; + struct fence **shared; +}; + struct ipu_crtc { struct device *dev; struct drm_crtc base; @@ -42,8 +61,9 @@ struct ipu_crtc { struct ipu_dc *dc; struct ipu_di *di; int enabled; - struct drm_pending_vblank_event *page_flip_event; - struct drm_framebuffer *newfb; + enum ipu_flip_status flip_state; + struct workqueue_struct *flip_queue; + struct ipu_flip_work *flip_work; int irq; u32 bus_format; int di_hsync_pin; @@ -102,15 +122,45 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) } } +static void ipu_flip_unref_work_func(struct work_struct *__work) +{ + struct ipu_flip_work *work = + container_of(__work, struct ipu_flip_work, unref_work); + + drm_gem_object_unreference_unlocked(work->bo); + kfree(work); +} + +static void ipu_flip_fence_work_func(struct work_struct *__work) +{ + struct ipu_flip_work *work = + container_of(__work, struct ipu_flip_work, fence_work); + int i; + + /* wait for all fences attached to the FB obj to signal */ + if (work->excl) { + fence_wait(work->excl, false); + fence_put(work->excl); + } + for (i = 0; i < work->shared_count; i++) { + fence_wait(work->shared[i], false); + fence_put(work->shared[i]); + } + + work->crtc->flip_state = IPU_FLIP_SUBMITTED; +} + static int ipu_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags) { + struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); + struct ipu_flip_work *flip_work; int ret; - if (ipu_crtc->newfb) + if (ipu_crtc->flip_state != IPU_FLIP_NONE) return -EBUSY; ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc); @@ -121,11 +171,58 @@ static int ipu_page_flip(struct drm_crtc *crtc, return ret; } - ipu_crtc->newfb = fb; - ipu_crtc->page_flip_event = event; - crtc->primary->fb = fb; + flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL); + if (!flip_work) { + ret = -ENOMEM; + goto put_vblank; + } + INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func); + flip_work->page_flip_event = event; + + /* get BO backing the old framebuffer and take a reference */ + flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base; + drm_gem_object_reference(flip_work->bo); + + ipu_crtc->flip_work = flip_work; + /* + * If the object has a DMABUF attached, we need to wait on its fences + * if there are any. + */ + if (cma_obj->base.dma_buf) { + INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func); + flip_work->crtc = ipu_crtc; + + ret = reservation_object_get_fences_rcu( + cma_obj->base.dma_buf->resv, &flip_work->excl, + &flip_work->shared_count, &flip_work->shared); + + if (unlikely(ret)) { + DRM_ERROR("failed to get fences for buffer\n"); + goto free_flip_work; + } + + /* No need to queue the worker if the are no fences */ + if (!flip_work->excl && !flip_work->shared_count) { + ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; + } else { + ipu_crtc->flip_state = IPU_FLIP_PENDING; + queue_work(ipu_crtc->flip_queue, + &flip_work->fence_work); + } + } else { + ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; + } return 0; + +free_flip_work: + drm_gem_object_unreference_unlocked(flip_work->bo); + kfree(flip_work); + ipu_crtc->flip_work = NULL; +put_vblank: + imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); + + return ret; } static const struct drm_crtc_funcs ipu_crtc_funcs = { @@ -209,12 +306,12 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) { unsigned long flags; struct drm_device *drm = ipu_crtc->base.dev; + struct ipu_flip_work *work = ipu_crtc->flip_work; spin_lock_irqsave(&drm->event_lock, flags); - if (ipu_crtc->page_flip_event) + if (work->page_flip_event) drm_crtc_send_vblank_event(&ipu_crtc->base, - ipu_crtc->page_flip_event); - ipu_crtc->page_flip_event = NULL; + work->page_flip_event); imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); spin_unlock_irqrestore(&drm->event_lock, flags); } @@ -225,13 +322,15 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id) imx_drm_handle_vblank(ipu_crtc->imx_crtc); - if (ipu_crtc->newfb) { + if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) { struct ipu_plane *plane = ipu_crtc->plane[0]; - ipu_crtc->newfb = NULL; ipu_plane_set_base(plane, ipu_crtc->base.primary->fb, plane->x, plane->y); ipu_crtc_handle_pageflip(ipu_crtc); + queue_work(ipu_crtc->flip_queue, + &ipu_crtc->flip_work->unref_work); + ipu_crtc->flip_state = IPU_FLIP_NONE; } return IRQ_HANDLED; @@ -280,6 +379,10 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = { static int ipu_enable_vblank(struct drm_crtc *crtc) { + struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); + + enable_irq(ipu_crtc->irq); + return 0; } @@ -287,8 +390,7 @@ static void ipu_disable_vblank(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); - ipu_crtc->page_flip_event = NULL; - ipu_crtc->newfb = NULL; + disable_irq_nosync(ipu_crtc->irq); } static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, @@ -399,6 +501,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); goto err_put_plane_res; } + /* Only enable IRQ when we actually need it to trigger work. */ + disable_irq(ipu_crtc->irq); + + ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip"); return 0; @@ -441,6 +547,7 @@ static void ipu_drm_unbind(struct device *dev, struct device *master, imx_drm_remove_crtc(ipu_crtc->imx_crtc); + destroy_workqueue(ipu_crtc->flip_queue); ipu_plane_put_resources(ipu_crtc->plane[0]); ipu_put_resources(ipu_crtc); } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 591ba2f1ae03..d078373c4233 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -338,7 +338,7 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, } if (crtc != plane->crtc) - dev_info(plane->dev->dev, "crtc change: %p -> %p\n", + dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", plane->crtc, crtc); plane->crtc = crtc; diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 0ffef172afb4..363e2c7741e2 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -112,13 +112,6 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode) drm_panel_enable(imxpd->panel); } -static bool imx_pd_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void imx_pd_encoder_prepare(struct drm_encoder *encoder) { struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); @@ -166,7 +159,6 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = { static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = { .dpms = imx_pd_encoder_dpms, - .mode_fixup = imx_pd_encoder_mode_fixup, .prepare = imx_pd_encoder_prepare, .commit = imx_pd_encoder_commit, .mode_set = imx_pd_encoder_mode_set, diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index dc13c4857e6f..af8b4c19cf15 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1479,13 +1479,6 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, * These functions are analagous to those in the CRTC code, but are intended * to handle any encoder-specific limitations */ -static bool mga_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void mga_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -1515,7 +1508,6 @@ static void mga_encoder_destroy(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = { .dpms = mga_encoder_dpms, - .mode_fixup = mga_encoder_mode_fixup, .mode_set = mga_encoder_mode_set, .prepare = mga_encoder_prepare, .commit = mga_encoder_commit, diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 065ad4138799..ddb4c9d097e4 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -12,6 +12,7 @@ msm-y := \ hdmi/hdmi_connector.o \ hdmi/hdmi_hdcp.o \ hdmi/hdmi_i2c.o \ + hdmi/hdmi_phy.o \ hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ @@ -52,6 +53,8 @@ msm-y := \ msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o +msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o +msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ mdp/mdp4/mdp4_dsi_encoder.o \ diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index 9e2aceb4ffe6..fee24297fb92 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index 97dc1c6ec107..27dabd5e57fb 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) -Copyright (C) 2013-2015 by the following authors: +Copyright (C) 2013-2016 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -111,10 +112,14 @@ enum a3xx_vtx_fmt { VFMT_8_8_SNORM = 53, VFMT_8_8_8_SNORM = 54, VFMT_8_8_8_8_SNORM = 55, - VFMT_10_10_10_2_UINT = 60, - VFMT_10_10_10_2_UNORM = 61, - VFMT_10_10_10_2_SINT = 62, - VFMT_10_10_10_2_SNORM = 63, + VFMT_10_10_10_2_UINT = 56, + VFMT_10_10_10_2_UNORM = 57, + VFMT_10_10_10_2_SINT = 58, + VFMT_10_10_10_2_SNORM = 59, + VFMT_2_10_10_10_UINT = 60, + VFMT_2_10_10_10_UNORM = 61, + VFMT_2_10_10_10_SINT = 62, + VFMT_2_10_10_10_SNORM = 63, }; enum a3xx_tex_fmt { @@ -138,10 +143,12 @@ enum a3xx_tex_fmt { TFMT_DXT1 = 36, TFMT_DXT3 = 37, TFMT_DXT5 = 38, + TFMT_2_10_10_10_UNORM = 40, TFMT_10_10_10_2_UNORM = 41, TFMT_9_9_9_E5_FLOAT = 42, TFMT_11_11_10_FLOAT = 43, TFMT_A8_UNORM = 44, + TFMT_L8_UNORM = 45, TFMT_L8_A8_UNORM = 47, TFMT_8_UNORM = 48, TFMT_8_8_UNORM = 49, @@ -183,6 +190,8 @@ enum a3xx_tex_fmt { TFMT_32_SINT = 92, TFMT_32_32_SINT = 93, TFMT_32_32_32_32_SINT = 95, + TFMT_2_10_10_10_UINT = 96, + TFMT_10_10_10_2_UINT = 97, TFMT_ETC2_RG11_SNORM = 112, TFMT_ETC2_RG11_UNORM = 113, TFMT_ETC2_R11_SNORM = 114, @@ -215,6 +224,9 @@ enum a3xx_color_fmt { RB_R8_UINT = 14, RB_R8_SINT = 15, RB_R10G10B10A2_UNORM = 16, + RB_A2R10G10B10_UNORM = 17, + RB_R10G10B10A2_UINT = 18, + RB_A2R10G10B10_UINT = 19, RB_A8_UNORM = 20, RB_R8_UNORM = 21, RB_R16_FLOAT = 24, @@ -244,30 +256,273 @@ enum a3xx_color_fmt { RB_R32G32B32A32_UINT = 59, }; +enum a3xx_cp_perfcounter_select { + CP_ALWAYS_COUNT = 0, + CP_AHB_PFPTRANS_WAIT = 3, + CP_AHB_NRTTRANS_WAIT = 6, + CP_CSF_NRT_READ_WAIT = 8, + CP_CSF_I1_FIFO_FULL = 9, + CP_CSF_I2_FIFO_FULL = 10, + CP_CSF_ST_FIFO_FULL = 11, + CP_RESERVED_12 = 12, + CP_CSF_RING_ROQ_FULL = 13, + CP_CSF_I1_ROQ_FULL = 14, + CP_CSF_I2_ROQ_FULL = 15, + CP_CSF_ST_ROQ_FULL = 16, + CP_RESERVED_17 = 17, + CP_MIU_TAG_MEM_FULL = 18, + CP_MIU_NRT_WRITE_STALLED = 22, + CP_MIU_NRT_READ_STALLED = 23, + CP_ME_REGS_RB_DONE_FIFO_FULL = 26, + CP_ME_REGS_VS_EVENT_FIFO_FULL = 27, + CP_ME_REGS_PS_EVENT_FIFO_FULL = 28, + CP_ME_REGS_CF_EVENT_FIFO_FULL = 29, + CP_ME_MICRO_RB_STARVED = 30, + CP_AHB_RBBM_DWORD_SENT = 40, + CP_ME_BUSY_CLOCKS = 41, + CP_ME_WAIT_CONTEXT_AVAIL = 42, + CP_PFP_TYPE0_PACKET = 43, + CP_PFP_TYPE3_PACKET = 44, + CP_CSF_RB_WPTR_NEQ_RPTR = 45, + CP_CSF_I1_SIZE_NEQ_ZERO = 46, + CP_CSF_I2_SIZE_NEQ_ZERO = 47, + CP_CSF_RBI1I2_FETCHING = 48, +}; + +enum a3xx_gras_tse_perfcounter_select { + GRAS_TSEPERF_INPUT_PRIM = 0, + GRAS_TSEPERF_INPUT_NULL_PRIM = 1, + GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2, + GRAS_TSEPERF_CLIPPED_PRIM = 3, + GRAS_TSEPERF_NEW_PRIM = 4, + GRAS_TSEPERF_ZERO_AREA_PRIM = 5, + GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6, + GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7, + GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8, + GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9, + GRAS_TSEPERF_PRE_CLIP_PRIM = 10, + GRAS_TSEPERF_POST_CLIP_PRIM = 11, + GRAS_TSEPERF_WORKING_CYCLES = 12, + GRAS_TSEPERF_PC_STARVE = 13, + GRAS_TSERASPERF_STALL = 14, +}; + +enum a3xx_gras_ras_perfcounter_select { + GRAS_RASPERF_16X16_TILES = 0, + GRAS_RASPERF_8X8_TILES = 1, + GRAS_RASPERF_4X4_TILES = 2, + GRAS_RASPERF_WORKING_CYCLES = 3, + GRAS_RASPERF_STALL_CYCLES_BY_RB = 4, + GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5, + GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6, +}; + +enum a3xx_hlsq_perfcounter_select { + HLSQ_PERF_SP_VS_CONSTANT = 0, + HLSQ_PERF_SP_VS_INSTRUCTIONS = 1, + HLSQ_PERF_SP_FS_CONSTANT = 2, + HLSQ_PERF_SP_FS_INSTRUCTIONS = 3, + HLSQ_PERF_TP_STATE = 4, + HLSQ_PERF_QUADS = 5, + HLSQ_PERF_PIXELS = 6, + HLSQ_PERF_VERTICES = 7, + HLSQ_PERF_FS8_THREADS = 8, + HLSQ_PERF_FS16_THREADS = 9, + HLSQ_PERF_FS32_THREADS = 10, + HLSQ_PERF_VS8_THREADS = 11, + HLSQ_PERF_VS16_THREADS = 12, + HLSQ_PERF_SP_VS_DATA_BYTES = 13, + HLSQ_PERF_SP_FS_DATA_BYTES = 14, + HLSQ_PERF_ACTIVE_CYCLES = 15, + HLSQ_PERF_STALL_CYCLES_SP_STATE = 16, + HLSQ_PERF_STALL_CYCLES_SP_VS = 17, + HLSQ_PERF_STALL_CYCLES_SP_FS = 18, + HLSQ_PERF_STALL_CYCLES_UCHE = 19, + HLSQ_PERF_RBBM_LOAD_CYCLES = 20, + HLSQ_PERF_DI_TO_VS_START_SP0 = 21, + HLSQ_PERF_DI_TO_FS_START_SP0 = 22, + HLSQ_PERF_VS_START_TO_DONE_SP0 = 23, + HLSQ_PERF_FS_START_TO_DONE_SP0 = 24, + HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25, + HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26, + HLSQ_PERF_UCHE_LATENCY_CYCLES = 27, + HLSQ_PERF_UCHE_LATENCY_COUNT = 28, +}; + +enum a3xx_pc_perfcounter_select { + PC_PCPERF_VISIBILITY_STREAMS = 0, + PC_PCPERF_TOTAL_INSTANCES = 1, + PC_PCPERF_PRIMITIVES_PC_VPC = 2, + PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3, + PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4, + PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5, + PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6, + PC_PCPERF_VERTICES_TO_VFD = 7, + PC_PCPERF_REUSED_VERTICES = 8, + PC_PCPERF_CYCLES_STALLED_BY_VFD = 9, + PC_PCPERF_CYCLES_STALLED_BY_TSE = 10, + PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11, + PC_PCPERF_CYCLES_IS_WORKING = 12, +}; + +enum a3xx_rb_perfcounter_select { + RB_RBPERF_ACTIVE_CYCLES_ANY = 0, + RB_RBPERF_ACTIVE_CYCLES_ALL = 1, + RB_RBPERF_STARVE_CYCLES_BY_SP = 2, + RB_RBPERF_STARVE_CYCLES_BY_RAS = 3, + RB_RBPERF_STARVE_CYCLES_BY_MARB = 4, + RB_RBPERF_STALL_CYCLES_BY_MARB = 5, + RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6, + RB_RBPERF_RB_MARB_DATA = 7, + RB_RBPERF_SP_RB_QUAD = 8, + RB_RBPERF_RAS_EARLY_Z_QUADS = 9, + RB_RBPERF_GMEM_CH0_READ = 10, + RB_RBPERF_GMEM_CH1_READ = 11, + RB_RBPERF_GMEM_CH0_WRITE = 12, + RB_RBPERF_GMEM_CH1_WRITE = 13, + RB_RBPERF_CP_CONTEXT_DONE = 14, + RB_RBPERF_CP_CACHE_FLUSH = 15, + RB_RBPERF_CP_ZPASS_DONE = 16, +}; + +enum a3xx_rbbm_perfcounter_select { + RBBM_ALAWYS_ON = 0, + RBBM_VBIF_BUSY = 1, + RBBM_TSE_BUSY = 2, + RBBM_RAS_BUSY = 3, + RBBM_PC_DCALL_BUSY = 4, + RBBM_PC_VSD_BUSY = 5, + RBBM_VFD_BUSY = 6, + RBBM_VPC_BUSY = 7, + RBBM_UCHE_BUSY = 8, + RBBM_VSC_BUSY = 9, + RBBM_HLSQ_BUSY = 10, + RBBM_ANY_RB_BUSY = 11, + RBBM_ANY_TEX_BUSY = 12, + RBBM_ANY_USP_BUSY = 13, + RBBM_ANY_MARB_BUSY = 14, + RBBM_ANY_ARB_BUSY = 15, + RBBM_AHB_STATUS_BUSY = 16, + RBBM_AHB_STATUS_STALLED = 17, + RBBM_AHB_STATUS_TXFR = 18, + RBBM_AHB_STATUS_TXFR_SPLIT = 19, + RBBM_AHB_STATUS_TXFR_ERROR = 20, + RBBM_AHB_STATUS_LONG_STALL = 21, + RBBM_RBBM_STATUS_MASKED = 22, +}; + enum a3xx_sp_perfcounter_select { + SP_LM_LOAD_INSTRUCTIONS = 0, + SP_LM_STORE_INSTRUCTIONS = 1, + SP_LM_ATOMICS = 2, + SP_UCHE_LOAD_INSTRUCTIONS = 3, + SP_UCHE_STORE_INSTRUCTIONS = 4, + SP_UCHE_ATOMICS = 5, + SP_VS_TEX_INSTRUCTIONS = 6, + SP_VS_CFLOW_INSTRUCTIONS = 7, + SP_VS_EFU_INSTRUCTIONS = 8, + SP_VS_FULL_ALU_INSTRUCTIONS = 9, + SP_VS_HALF_ALU_INSTRUCTIONS = 10, + SP_FS_TEX_INSTRUCTIONS = 11, SP_FS_CFLOW_INSTRUCTIONS = 12, + SP_FS_EFU_INSTRUCTIONS = 13, SP_FS_FULL_ALU_INSTRUCTIONS = 14, - SP0_ICL1_MISSES = 26, + SP_FS_HALF_ALU_INSTRUCTIONS = 15, + SP_FS_BARY_INSTRUCTIONS = 16, + SP_VS_INSTRUCTIONS = 17, + SP_FS_INSTRUCTIONS = 18, + SP_ADDR_LOCK_COUNT = 19, + SP_UCHE_READ_TRANS = 20, + SP_UCHE_WRITE_TRANS = 21, + SP_EXPORT_VPC_TRANS = 22, + SP_EXPORT_RB_TRANS = 23, + SP_PIXELS_KILLED = 24, + SP_ICL1_REQUESTS = 25, + SP_ICL1_MISSES = 26, + SP_ICL0_REQUESTS = 27, + SP_ICL0_MISSES = 28, SP_ALU_ACTIVE_CYCLES = 29, + SP_EFU_ACTIVE_CYCLES = 30, + SP_STALL_CYCLES_BY_VPC = 31, + SP_STALL_CYCLES_BY_TP = 32, + SP_STALL_CYCLES_BY_UCHE = 33, + SP_STALL_CYCLES_BY_RB = 34, + SP_ACTIVE_CYCLES_ANY = 35, + SP_ACTIVE_CYCLES_ALL = 36, +}; + +enum a3xx_tp_perfcounter_select { + TPL1_TPPERF_L1_REQUESTS = 0, + TPL1_TPPERF_TP0_L1_REQUESTS = 1, + TPL1_TPPERF_TP0_L1_MISSES = 2, + TPL1_TPPERF_TP1_L1_REQUESTS = 3, + TPL1_TPPERF_TP1_L1_MISSES = 4, + TPL1_TPPERF_TP2_L1_REQUESTS = 5, + TPL1_TPPERF_TP2_L1_MISSES = 6, + TPL1_TPPERF_TP3_L1_REQUESTS = 7, + TPL1_TPPERF_TP3_L1_MISSES = 8, + TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9, + TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10, + TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11, + TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12, + TPL1_TPPERF_BILINEAR_OPS = 13, + TPL1_TPPERF_QUADSQUADS_OFFSET = 14, + TPL1_TPPERF_QUADQUADS_SHADOW = 15, + TPL1_TPPERF_QUADS_ARRAY = 16, + TPL1_TPPERF_QUADS_PROJECTION = 17, + TPL1_TPPERF_QUADS_GRADIENT = 18, + TPL1_TPPERF_QUADS_1D2D = 19, + TPL1_TPPERF_QUADS_3DCUBE = 20, + TPL1_TPPERF_ZERO_LOD = 21, + TPL1_TPPERF_OUTPUT_TEXELS = 22, + TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23, + TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24, + TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25, + TPL1_TPPERF_LATENCY = 26, + TPL1_TPPERF_LATENCY_TRANS = 27, }; -enum a3xx_rop_code { - ROP_CLEAR = 0, - ROP_NOR = 1, - ROP_AND_INVERTED = 2, - ROP_COPY_INVERTED = 3, - ROP_AND_REVERSE = 4, - ROP_INVERT = 5, - ROP_XOR = 6, - ROP_NAND = 7, - ROP_AND = 8, - ROP_EQUIV = 9, - ROP_NOOP = 10, - ROP_OR_INVERTED = 11, - ROP_COPY = 12, - ROP_OR_REVERSE = 13, - ROP_OR = 14, - ROP_SET = 15, +enum a3xx_vfd_perfcounter_select { + VFD_PERF_UCHE_BYTE_FETCHED = 0, + VFD_PERF_UCHE_TRANS = 1, + VFD_PERF_VPC_BYPASS_COMPONENTS = 2, + VFD_PERF_FETCH_INSTRUCTIONS = 3, + VFD_PERF_DECODE_INSTRUCTIONS = 4, + VFD_PERF_ACTIVE_CYCLES = 5, + VFD_PERF_STALL_CYCLES_UCHE = 6, + VFD_PERF_STALL_CYCLES_HLSQ = 7, + VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8, + VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9, +}; + +enum a3xx_vpc_perfcounter_select { + VPC_PERF_SP_LM_PRIMITIVES = 0, + VPC_PERF_COMPONENTS_FROM_SP = 1, + VPC_PERF_SP_LM_COMPONENTS = 2, + VPC_PERF_ACTIVE_CYCLES = 3, + VPC_PERF_STALL_CYCLES_LM = 4, + VPC_PERF_STALL_CYCLES_RAS = 5, +}; + +enum a3xx_uche_perfcounter_select { + UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0, + UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1, + UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2, + UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3, + UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4, + UCHE_UCHEPERF_READ_REQUESTS_TP = 8, + UCHE_UCHEPERF_READ_REQUESTS_VFD = 9, + UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10, + UCHE_UCHEPERF_READ_REQUESTS_MARB = 11, + UCHE_UCHEPERF_READ_REQUESTS_SP = 12, + UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13, + UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14, + UCHE_UCHEPERF_TAG_CHECK_FAILS = 15, + UCHE_UCHEPERF_EVICTS = 16, + UCHE_UCHEPERF_FLUSHES = 17, + UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18, + UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19, + UCHE_UCHEPERF_ACTIVE_CYCLES = 20, }; enum a3xx_rb_blend_opcode { @@ -1429,15 +1684,23 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_ #define REG_A3XX_PC_RESTART_INDEX 0x000021ed #define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200 -#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010 +#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030 #define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4 static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val) { return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK; } #define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040 +#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100 #define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200 #define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400 +#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000 +#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12 +static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK; +} +#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000 #define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000 #define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000 #define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27 @@ -1451,17 +1714,39 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val) #define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000 #define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201 -#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040 +#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0 #define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6 static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val) { return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK; } #define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 -#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200 -#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000 +#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000 +#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16 +static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK; +} +#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000 +#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24 +static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK; +} #define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202 +#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc +#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2 +static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK; +} +#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000 +#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18 +static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK; +} #define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 #define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26 static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) @@ -1478,13 +1763,13 @@ static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val) } #define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204 -#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff #define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0 static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val) { return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK; } -#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000 +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000 #define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12 static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val) { @@ -1498,13 +1783,13 @@ static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val) } #define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205 -#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff #define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0 static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val) { return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK; } -#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000 +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000 #define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12 static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val) { @@ -1518,13 +1803,13 @@ static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val) } #define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206 -#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff #define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0 static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val) { return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK; } -#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000 +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000 #define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16 static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val) { @@ -1532,13 +1817,13 @@ static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val) } #define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207 -#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff #define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0 static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val) { return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK; } -#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000 +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000 #define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16 static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val) { @@ -1620,12 +1905,24 @@ static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val) } #define REG_A3XX_VFD_CONTROL_1 0x00002241 -#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff +#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f #define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0 static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val) { return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK; } +#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0 +#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4 +static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK; +} +#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00 +#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8 +static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK; +} #define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000 #define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16 static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) @@ -2008,24 +2305,19 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK; } #define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004 +#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008 #define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 #define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) { return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; } -#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00 +#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 #define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) { return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; } -#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 -#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 -static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK; -} #define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000 #define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20 static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) @@ -2033,8 +2325,6 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK; } #define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000 -#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000 -#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000 #define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000 #define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24 static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val) @@ -2075,7 +2365,8 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val) { return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK; } -#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000 +#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000 +#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000 #define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20 static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val) { @@ -2085,24 +2376,26 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val) static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; } static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; } -#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff +#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff #define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val) { return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK; } +#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100 #define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00 #define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9 static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) { return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK; } -#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000 +#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000 #define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val) { return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK; } +#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000 #define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000 #define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25 static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) @@ -2113,25 +2406,25 @@ static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; } static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; } -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f #define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) { return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; } -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) { return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; } -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) { return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; } -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) { @@ -2139,6 +2432,12 @@ static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) } #define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4 +#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff +#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0 +static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK; +} #define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 #define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) @@ -2155,8 +2454,38 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) #define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5 #define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6 +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK; +} +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00 +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8 +static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK; +} +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK; +} #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7 +#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f +#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0 +static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK; +} +#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0 +#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5 +static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val) +{ + return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK; +} #define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8 @@ -2182,24 +2511,22 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK; } #define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004 +#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008 #define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 #define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) { return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; } -#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00 +#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 #define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) { return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; } -#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 -#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 -static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK; -} +#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000 +#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000 +#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000 #define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000 #define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20 static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) @@ -2235,7 +2562,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) { return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK; } -#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000 +#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000 #define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24 static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val) { @@ -2243,6 +2570,12 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val) } #define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2 +#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff +#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0 +static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK; +} #define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 #define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) @@ -2259,8 +2592,38 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) #define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3 #define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4 +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val) +{ + return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK; +} +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00 +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8 +static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK; +} +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK; +} #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5 +#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f +#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0 +static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val) +{ + return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK; +} +#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0 +#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5 +static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val) +{ + return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK; +} #define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6 diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h index 99de8271dba8..3220b91f559a 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h @@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) -Copyright (C) 2013-2015 by the following authors: +Copyright (C) 2013-2016 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -47,11 +48,13 @@ enum a4xx_color_fmt { RB4_R8_UNORM = 2, RB4_R4G4B4A4_UNORM = 8, RB4_R5G5B5A1_UNORM = 10, - RB4_R5G6R5_UNORM = 14, + RB4_R5G6B5_UNORM = 14, RB4_R8G8_UNORM = 15, RB4_R8G8_SNORM = 16, RB4_R8G8_UINT = 17, RB4_R8G8_SINT = 18, + RB4_R16_UNORM = 19, + RB4_R16_SNORM = 20, RB4_R16_FLOAT = 21, RB4_R16_UINT = 22, RB4_R16_SINT = 23, @@ -63,12 +66,16 @@ enum a4xx_color_fmt { RB4_R10G10B10A2_UNORM = 31, RB4_R10G10B10A2_UINT = 34, RB4_R11G11B10_FLOAT = 39, + RB4_R16G16_UNORM = 40, + RB4_R16G16_SNORM = 41, RB4_R16G16_FLOAT = 42, RB4_R16G16_UINT = 43, RB4_R16G16_SINT = 44, RB4_R32_FLOAT = 45, RB4_R32_UINT = 46, RB4_R32_SINT = 47, + RB4_R16G16B16A16_UNORM = 52, + RB4_R16G16B16A16_SNORM = 53, RB4_R16G16B16A16_FLOAT = 54, RB4_R16G16B16A16_UINT = 55, RB4_R16G16B16A16_SINT = 56, @@ -106,6 +113,7 @@ enum a4xx_vtx_fmt { VFMT4_32_32_FIXED = 10, VFMT4_32_32_32_FIXED = 11, VFMT4_32_32_32_32_FIXED = 12, + VFMT4_11_11_10_FLOAT = 13, VFMT4_16_SINT = 16, VFMT4_16_16_SINT = 17, VFMT4_16_16_16_SINT = 18, @@ -146,52 +154,76 @@ enum a4xx_vtx_fmt { VFMT4_8_8_SNORM = 53, VFMT4_8_8_8_SNORM = 54, VFMT4_8_8_8_8_SNORM = 55, - VFMT4_10_10_10_2_UINT = 60, - VFMT4_10_10_10_2_UNORM = 61, - VFMT4_10_10_10_2_SINT = 62, - VFMT4_10_10_10_2_SNORM = 63, + VFMT4_10_10_10_2_UINT = 56, + VFMT4_10_10_10_2_UNORM = 57, + VFMT4_10_10_10_2_SINT = 58, + VFMT4_10_10_10_2_SNORM = 59, + VFMT4_2_10_10_10_UINT = 60, + VFMT4_2_10_10_10_UNORM = 61, + VFMT4_2_10_10_10_SINT = 62, + VFMT4_2_10_10_10_SNORM = 63, }; enum a4xx_tex_fmt { - TFMT4_5_6_5_UNORM = 11, - TFMT4_5_5_5_1_UNORM = 10, - TFMT4_4_4_4_4_UNORM = 8, - TFMT4_X8Z24_UNORM = 71, - TFMT4_10_10_10_2_UNORM = 33, TFMT4_A8_UNORM = 3, - TFMT4_L8_A8_UNORM = 13, TFMT4_8_UNORM = 4, - TFMT4_8_8_UNORM = 14, - TFMT4_8_8_8_8_UNORM = 28, TFMT4_8_SNORM = 5, - TFMT4_8_8_SNORM = 15, - TFMT4_8_8_8_8_SNORM = 29, TFMT4_8_UINT = 6, - TFMT4_8_8_UINT = 16, - TFMT4_8_8_8_8_UINT = 30, TFMT4_8_SINT = 7, + TFMT4_4_4_4_4_UNORM = 8, + TFMT4_5_5_5_1_UNORM = 9, + TFMT4_5_6_5_UNORM = 11, + TFMT4_L8_A8_UNORM = 13, + TFMT4_8_8_UNORM = 14, + TFMT4_8_8_SNORM = 15, + TFMT4_8_8_UINT = 16, TFMT4_8_8_SINT = 17, - TFMT4_8_8_8_8_SINT = 31, + TFMT4_16_UNORM = 18, + TFMT4_16_SNORM = 19, + TFMT4_16_FLOAT = 20, TFMT4_16_UINT = 21, - TFMT4_16_16_UINT = 41, - TFMT4_16_16_16_16_UINT = 54, TFMT4_16_SINT = 22, + TFMT4_8_8_8_8_UNORM = 28, + TFMT4_8_8_8_8_SNORM = 29, + TFMT4_8_8_8_8_UINT = 30, + TFMT4_8_8_8_8_SINT = 31, + TFMT4_9_9_9_E5_FLOAT = 32, + TFMT4_10_10_10_2_UNORM = 33, + TFMT4_10_10_10_2_UINT = 34, + TFMT4_11_11_10_FLOAT = 37, + TFMT4_16_16_UNORM = 38, + TFMT4_16_16_SNORM = 39, + TFMT4_16_16_FLOAT = 40, + TFMT4_16_16_UINT = 41, TFMT4_16_16_SINT = 42, - TFMT4_16_16_16_16_SINT = 55, + TFMT4_32_FLOAT = 43, TFMT4_32_UINT = 44, - TFMT4_32_32_UINT = 57, - TFMT4_32_32_32_32_UINT = 64, TFMT4_32_SINT = 45, - TFMT4_32_32_SINT = 58, - TFMT4_32_32_32_32_SINT = 65, - TFMT4_16_FLOAT = 20, - TFMT4_16_16_FLOAT = 40, + TFMT4_16_16_16_16_UNORM = 51, + TFMT4_16_16_16_16_SNORM = 52, TFMT4_16_16_16_16_FLOAT = 53, - TFMT4_32_FLOAT = 43, + TFMT4_16_16_16_16_UINT = 54, + TFMT4_16_16_16_16_SINT = 55, TFMT4_32_32_FLOAT = 56, + TFMT4_32_32_UINT = 57, + TFMT4_32_32_SINT = 58, + TFMT4_32_32_32_FLOAT = 59, + TFMT4_32_32_32_UINT = 60, + TFMT4_32_32_32_SINT = 61, TFMT4_32_32_32_32_FLOAT = 63, - TFMT4_9_9_9_E5_FLOAT = 32, - TFMT4_11_11_10_FLOAT = 37, + TFMT4_32_32_32_32_UINT = 64, + TFMT4_32_32_32_32_SINT = 65, + TFMT4_X8Z24_UNORM = 71, + TFMT4_DXT1 = 86, + TFMT4_DXT3 = 87, + TFMT4_DXT5 = 88, + TFMT4_RGTC1_UNORM = 90, + TFMT4_RGTC1_SNORM = 91, + TFMT4_RGTC2_UNORM = 94, + TFMT4_RGTC2_SNORM = 95, + TFMT4_BPTC_UFLOAT = 97, + TFMT4_BPTC_FLOAT = 98, + TFMT4_BPTC = 99, TFMT4_ATC_RGB = 100, TFMT4_ATC_RGBA_EXPLICIT = 101, TFMT4_ATC_RGBA_INTERPOLATED = 102, @@ -240,6 +272,545 @@ enum a4xx_tess_spacing { EVEN_SPACING = 3, }; +enum a4xx_ccu_perfcounter_select { + CCU_BUSY_CYCLES = 0, + CCU_RB_DEPTH_RETURN_STALL = 2, + CCU_RB_COLOR_RETURN_STALL = 3, + CCU_DEPTH_BLOCKS = 6, + CCU_COLOR_BLOCKS = 7, + CCU_DEPTH_BLOCK_HIT = 8, + CCU_COLOR_BLOCK_HIT = 9, + CCU_DEPTH_FLAG1_COUNT = 10, + CCU_DEPTH_FLAG2_COUNT = 11, + CCU_DEPTH_FLAG3_COUNT = 12, + CCU_DEPTH_FLAG4_COUNT = 13, + CCU_COLOR_FLAG1_COUNT = 14, + CCU_COLOR_FLAG2_COUNT = 15, + CCU_COLOR_FLAG3_COUNT = 16, + CCU_COLOR_FLAG4_COUNT = 17, + CCU_PARTIAL_BLOCK_READ = 18, +}; + +enum a4xx_cp_perfcounter_select { + CP_ALWAYS_COUNT = 0, + CP_BUSY = 1, + CP_PFP_IDLE = 2, + CP_PFP_BUSY_WORKING = 3, + CP_PFP_STALL_CYCLES_ANY = 4, + CP_PFP_STARVE_CYCLES_ANY = 5, + CP_PFP_STARVED_PER_LOAD_ADDR = 6, + CP_PFP_STALLED_PER_STORE_ADDR = 7, + CP_PFP_PC_PROFILE = 8, + CP_PFP_MATCH_PM4_PKT_PROFILE = 9, + CP_PFP_COND_INDIRECT_DISCARDED = 10, + CP_LONG_RESUMPTIONS = 11, + CP_RESUME_CYCLES = 12, + CP_RESUME_TO_BOUNDARY_CYCLES = 13, + CP_LONG_PREEMPTIONS = 14, + CP_PREEMPT_CYCLES = 15, + CP_PREEMPT_TO_BOUNDARY_CYCLES = 16, + CP_ME_FIFO_EMPTY_PFP_IDLE = 17, + CP_ME_FIFO_EMPTY_PFP_BUSY = 18, + CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19, + CP_ME_FIFO_FULL_ME_BUSY = 20, + CP_ME_FIFO_FULL_ME_NON_WORKING = 21, + CP_ME_WAITING_FOR_PACKETS = 22, + CP_ME_BUSY_WORKING = 23, + CP_ME_STARVE_CYCLES_ANY = 24, + CP_ME_STARVE_CYCLES_PER_PROFILE = 25, + CP_ME_STALL_CYCLES_PER_PROFILE = 26, + CP_ME_PC_PROFILE = 27, + CP_RCIU_FIFO_EMPTY = 28, + CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29, + CP_RCIU_FIFO_FULL = 30, + CP_RCIU_FIFO_FULL_NO_CONTEXT = 31, + CP_RCIU_FIFO_FULL_AHB_MASTER = 32, + CP_RCIU_FIFO_FULL_OTHER = 33, + CP_AHB_IDLE = 34, + CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35, + CP_AHB_STALL_ON_GRANT_SPLIT = 36, + CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37, + CP_AHB_BUSY_WORKING = 38, + CP_AHB_BUSY_STALL_ON_HRDY = 39, + CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40, +}; + +enum a4xx_gras_ras_perfcounter_select { + RAS_SUPER_TILES = 0, + RAS_8X8_TILES = 1, + RAS_4X4_TILES = 2, + RAS_BUSY_CYCLES = 3, + RAS_STALL_CYCLES_BY_RB = 4, + RAS_STALL_CYCLES_BY_VSC = 5, + RAS_STARVE_CYCLES_BY_TSE = 6, + RAS_SUPERTILE_CYCLES = 7, + RAS_TILE_CYCLES = 8, + RAS_FULLY_COVERED_SUPER_TILES = 9, + RAS_FULLY_COVERED_8X8_TILES = 10, + RAS_4X4_PRIM = 11, + RAS_8X4_4X8_PRIM = 12, + RAS_8X8_PRIM = 13, +}; + +enum a4xx_gras_tse_perfcounter_select { + TSE_INPUT_PRIM = 0, + TSE_INPUT_NULL_PRIM = 1, + TSE_TRIVAL_REJ_PRIM = 2, + TSE_CLIPPED_PRIM = 3, + TSE_NEW_PRIM = 4, + TSE_ZERO_AREA_PRIM = 5, + TSE_FACENESS_CULLED_PRIM = 6, + TSE_ZERO_PIXEL_PRIM = 7, + TSE_OUTPUT_NULL_PRIM = 8, + TSE_OUTPUT_VISIBLE_PRIM = 9, + TSE_PRE_CLIP_PRIM = 10, + TSE_POST_CLIP_PRIM = 11, + TSE_BUSY_CYCLES = 12, + TSE_PC_STARVE = 13, + TSE_RAS_STALL = 14, + TSE_STALL_BARYPLANE_FIFO_FULL = 15, + TSE_STALL_ZPLANE_FIFO_FULL = 16, +}; + +enum a4xx_hlsq_perfcounter_select { + HLSQ_SP_VS_STAGE_CONSTANT = 0, + HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1, + HLSQ_SP_FS_STAGE_CONSTANT = 2, + HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3, + HLSQ_TP_STATE = 4, + HLSQ_QUADS = 5, + HLSQ_PIXELS = 6, + HLSQ_VERTICES = 7, + HLSQ_SP_VS_STAGE_DATA_BYTES = 13, + HLSQ_SP_FS_STAGE_DATA_BYTES = 14, + HLSQ_BUSY_CYCLES = 15, + HLSQ_STALL_CYCLES_SP_STATE = 16, + HLSQ_STALL_CYCLES_SP_VS_STAGE = 17, + HLSQ_STALL_CYCLES_SP_FS_STAGE = 18, + HLSQ_STALL_CYCLES_UCHE = 19, + HLSQ_RBBM_LOAD_CYCLES = 20, + HLSQ_DI_TO_VS_START_SP = 21, + HLSQ_DI_TO_FS_START_SP = 22, + HLSQ_VS_STAGE_START_TO_DONE_SP = 23, + HLSQ_FS_STAGE_START_TO_DONE_SP = 24, + HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25, + HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26, + HLSQ_UCHE_LATENCY_CYCLES = 27, + HLSQ_UCHE_LATENCY_COUNT = 28, + HLSQ_STARVE_CYCLES_VFD = 29, +}; + +enum a4xx_pc_perfcounter_select { + PC_VIS_STREAMS_LOADED = 0, + PC_VPC_PRIMITIVES = 2, + PC_DEAD_PRIM = 3, + PC_LIVE_PRIM = 4, + PC_DEAD_DRAWCALLS = 5, + PC_LIVE_DRAWCALLS = 6, + PC_VERTEX_MISSES = 7, + PC_STALL_CYCLES_VFD = 9, + PC_STALL_CYCLES_TSE = 10, + PC_STALL_CYCLES_UCHE = 11, + PC_WORKING_CYCLES = 12, + PC_IA_VERTICES = 13, + PC_GS_PRIMITIVES = 14, + PC_HS_INVOCATIONS = 15, + PC_DS_INVOCATIONS = 16, + PC_DS_PRIMITIVES = 17, + PC_STARVE_CYCLES_FOR_INDEX = 20, + PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21, + PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22, + PC_STALL_CYCLES_TESS = 23, + PC_STARVE_CYCLES_FOR_POSITION = 24, + PC_MODE0_DRAWCALL = 25, + PC_MODE1_DRAWCALL = 26, + PC_MODE2_DRAWCALL = 27, + PC_MODE3_DRAWCALL = 28, + PC_MODE4_DRAWCALL = 29, + PC_PREDICATED_DEAD_DRAWCALL = 30, + PC_STALL_CYCLES_BY_TSE_ONLY = 31, + PC_STALL_CYCLES_BY_VPC_ONLY = 32, + PC_VPC_POS_DATA_TRANSACTION = 33, + PC_BUSY_CYCLES = 34, + PC_STARVE_CYCLES_DI = 35, + PC_STALL_CYCLES_VPC = 36, + TESS_WORKING_CYCLES = 37, + TESS_NUM_CYCLES_SETUP_WORKING = 38, + TESS_NUM_CYCLES_PTGEN_WORKING = 39, + TESS_NUM_CYCLES_CONNGEN_WORKING = 40, + TESS_BUSY_CYCLES = 41, + TESS_STARVE_CYCLES_PC = 42, + TESS_STALL_CYCLES_PC = 43, +}; + +enum a4xx_pwr_perfcounter_select { + PWR_CORE_CLOCK_CYCLES = 0, + PWR_BUSY_CLOCK_CYCLES = 1, +}; + +enum a4xx_rb_perfcounter_select { + RB_BUSY_CYCLES = 0, + RB_BUSY_CYCLES_BINNING = 1, + RB_BUSY_CYCLES_RENDERING = 2, + RB_BUSY_CYCLES_RESOLVE = 3, + RB_STARVE_CYCLES_BY_SP = 4, + RB_STARVE_CYCLES_BY_RAS = 5, + RB_STARVE_CYCLES_BY_MARB = 6, + RB_STALL_CYCLES_BY_MARB = 7, + RB_STALL_CYCLES_BY_HLSQ = 8, + RB_RB_RB_MARB_DATA = 9, + RB_SP_RB_QUAD = 10, + RB_RAS_RB_Z_QUADS = 11, + RB_GMEM_CH0_READ = 12, + RB_GMEM_CH1_READ = 13, + RB_GMEM_CH0_WRITE = 14, + RB_GMEM_CH1_WRITE = 15, + RB_CP_CONTEXT_DONE = 16, + RB_CP_CACHE_FLUSH = 17, + RB_CP_ZPASS_DONE = 18, + RB_STALL_FIFO0_FULL = 19, + RB_STALL_FIFO1_FULL = 20, + RB_STALL_FIFO2_FULL = 21, + RB_STALL_FIFO3_FULL = 22, + RB_RB_HLSQ_TRANSACTIONS = 23, + RB_Z_READ = 24, + RB_Z_WRITE = 25, + RB_C_READ = 26, + RB_C_WRITE = 27, + RB_C_READ_LATENCY = 28, + RB_Z_READ_LATENCY = 29, + RB_STALL_BY_UCHE = 30, + RB_MARB_UCHE_TRANSACTIONS = 31, + RB_CACHE_STALL_MISS = 32, + RB_CACHE_STALL_FIFO_FULL = 33, + RB_8BIT_BLENDER_UNITS_ACTIVE = 34, + RB_16BIT_BLENDER_UNITS_ACTIVE = 35, + RB_SAMPLER_UNITS_ACTIVE = 36, + RB_TOTAL_PASS = 38, + RB_Z_PASS = 39, + RB_Z_FAIL = 40, + RB_S_FAIL = 41, + RB_POWER0 = 42, + RB_POWER1 = 43, + RB_POWER2 = 44, + RB_POWER3 = 45, + RB_POWER4 = 46, + RB_POWER5 = 47, + RB_POWER6 = 48, + RB_POWER7 = 49, +}; + +enum a4xx_rbbm_perfcounter_select { + RBBM_ALWAYS_ON = 0, + RBBM_VBIF_BUSY = 1, + RBBM_TSE_BUSY = 2, + RBBM_RAS_BUSY = 3, + RBBM_PC_DCALL_BUSY = 4, + RBBM_PC_VSD_BUSY = 5, + RBBM_VFD_BUSY = 6, + RBBM_VPC_BUSY = 7, + RBBM_UCHE_BUSY = 8, + RBBM_VSC_BUSY = 9, + RBBM_HLSQ_BUSY = 10, + RBBM_ANY_RB_BUSY = 11, + RBBM_ANY_TPL1_BUSY = 12, + RBBM_ANY_SP_BUSY = 13, + RBBM_ANY_MARB_BUSY = 14, + RBBM_ANY_ARB_BUSY = 15, + RBBM_AHB_STATUS_BUSY = 16, + RBBM_AHB_STATUS_STALLED = 17, + RBBM_AHB_STATUS_TXFR = 18, + RBBM_AHB_STATUS_TXFR_SPLIT = 19, + RBBM_AHB_STATUS_TXFR_ERROR = 20, + RBBM_AHB_STATUS_LONG_STALL = 21, + RBBM_STATUS_MASKED = 22, + RBBM_CP_BUSY_GFX_CORE_IDLE = 23, + RBBM_TESS_BUSY = 24, + RBBM_COM_BUSY = 25, + RBBM_DCOM_BUSY = 32, + RBBM_ANY_CCU_BUSY = 33, + RBBM_DPM_BUSY = 34, +}; + +enum a4xx_sp_perfcounter_select { + SP_LM_LOAD_INSTRUCTIONS = 0, + SP_LM_STORE_INSTRUCTIONS = 1, + SP_LM_ATOMICS = 2, + SP_GM_LOAD_INSTRUCTIONS = 3, + SP_GM_STORE_INSTRUCTIONS = 4, + SP_GM_ATOMICS = 5, + SP_VS_STAGE_TEX_INSTRUCTIONS = 6, + SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7, + SP_VS_STAGE_EFU_INSTRUCTIONS = 8, + SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9, + SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10, + SP_FS_STAGE_TEX_INSTRUCTIONS = 11, + SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12, + SP_FS_STAGE_EFU_INSTRUCTIONS = 13, + SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14, + SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15, + SP_VS_INSTRUCTIONS = 17, + SP_FS_INSTRUCTIONS = 18, + SP_ADDR_LOCK_COUNT = 19, + SP_UCHE_READ_TRANS = 20, + SP_UCHE_WRITE_TRANS = 21, + SP_EXPORT_VPC_TRANS = 22, + SP_EXPORT_RB_TRANS = 23, + SP_PIXELS_KILLED = 24, + SP_ICL1_REQUESTS = 25, + SP_ICL1_MISSES = 26, + SP_ICL0_REQUESTS = 27, + SP_ICL0_MISSES = 28, + SP_ALU_WORKING_CYCLES = 29, + SP_EFU_WORKING_CYCLES = 30, + SP_STALL_CYCLES_BY_VPC = 31, + SP_STALL_CYCLES_BY_TP = 32, + SP_STALL_CYCLES_BY_UCHE = 33, + SP_STALL_CYCLES_BY_RB = 34, + SP_BUSY_CYCLES = 35, + SP_HS_INSTRUCTIONS = 36, + SP_DS_INSTRUCTIONS = 37, + SP_GS_INSTRUCTIONS = 38, + SP_CS_INSTRUCTIONS = 39, + SP_SCHEDULER_NON_WORKING = 40, + SP_WAVE_CONTEXTS = 41, + SP_WAVE_CONTEXT_CYCLES = 42, + SP_POWER0 = 43, + SP_POWER1 = 44, + SP_POWER2 = 45, + SP_POWER3 = 46, + SP_POWER4 = 47, + SP_POWER5 = 48, + SP_POWER6 = 49, + SP_POWER7 = 50, + SP_POWER8 = 51, + SP_POWER9 = 52, + SP_POWER10 = 53, + SP_POWER11 = 54, + SP_POWER12 = 55, + SP_POWER13 = 56, + SP_POWER14 = 57, + SP_POWER15 = 58, +}; + +enum a4xx_tp_perfcounter_select { + TP_L1_REQUESTS = 0, + TP_L1_MISSES = 1, + TP_QUADS_OFFSET = 8, + TP_QUAD_SHADOW = 9, + TP_QUADS_ARRAY = 10, + TP_QUADS_GRADIENT = 11, + TP_QUADS_1D2D = 12, + TP_QUADS_3DCUBE = 13, + TP_BUSY_CYCLES = 16, + TP_STALL_CYCLES_BY_ARB = 17, + TP_STATE_CACHE_REQUESTS = 20, + TP_STATE_CACHE_MISSES = 21, + TP_POWER0 = 22, + TP_POWER1 = 23, + TP_POWER2 = 24, + TP_POWER3 = 25, + TP_POWER4 = 26, + TP_POWER5 = 27, + TP_POWER6 = 28, + TP_POWER7 = 29, +}; + +enum a4xx_uche_perfcounter_select { + UCHE_VBIF_READ_BEATS_TP = 0, + UCHE_VBIF_READ_BEATS_VFD = 1, + UCHE_VBIF_READ_BEATS_HLSQ = 2, + UCHE_VBIF_READ_BEATS_MARB = 3, + UCHE_VBIF_READ_BEATS_SP = 4, + UCHE_READ_REQUESTS_TP = 5, + UCHE_READ_REQUESTS_VFD = 6, + UCHE_READ_REQUESTS_HLSQ = 7, + UCHE_READ_REQUESTS_MARB = 8, + UCHE_READ_REQUESTS_SP = 9, + UCHE_WRITE_REQUESTS_MARB = 10, + UCHE_WRITE_REQUESTS_SP = 11, + UCHE_TAG_CHECK_FAILS = 12, + UCHE_EVICTS = 13, + UCHE_FLUSHES = 14, + UCHE_VBIF_LATENCY_CYCLES = 15, + UCHE_VBIF_LATENCY_SAMPLES = 16, + UCHE_BUSY_CYCLES = 17, + UCHE_VBIF_READ_BEATS_PC = 18, + UCHE_READ_REQUESTS_PC = 19, + UCHE_WRITE_REQUESTS_VPC = 20, + UCHE_STALL_BY_VBIF = 21, + UCHE_WRITE_REQUESTS_VSC = 22, + UCHE_POWER0 = 23, + UCHE_POWER1 = 24, + UCHE_POWER2 = 25, + UCHE_POWER3 = 26, + UCHE_POWER4 = 27, + UCHE_POWER5 = 28, + UCHE_POWER6 = 29, + UCHE_POWER7 = 30, +}; + +enum a4xx_vbif_perfcounter_select { + AXI_READ_REQUESTS_ID_0 = 0, + AXI_READ_REQUESTS_ID_1 = 1, + AXI_READ_REQUESTS_ID_2 = 2, + AXI_READ_REQUESTS_ID_3 = 3, + AXI_READ_REQUESTS_ID_4 = 4, + AXI_READ_REQUESTS_ID_5 = 5, + AXI_READ_REQUESTS_ID_6 = 6, + AXI_READ_REQUESTS_ID_7 = 7, + AXI_READ_REQUESTS_ID_8 = 8, + AXI_READ_REQUESTS_ID_9 = 9, + AXI_READ_REQUESTS_ID_10 = 10, + AXI_READ_REQUESTS_ID_11 = 11, + AXI_READ_REQUESTS_ID_12 = 12, + AXI_READ_REQUESTS_ID_13 = 13, + AXI_READ_REQUESTS_ID_14 = 14, + AXI_READ_REQUESTS_ID_15 = 15, + AXI0_READ_REQUESTS_TOTAL = 16, + AXI1_READ_REQUESTS_TOTAL = 17, + AXI2_READ_REQUESTS_TOTAL = 18, + AXI3_READ_REQUESTS_TOTAL = 19, + AXI_READ_REQUESTS_TOTAL = 20, + AXI_WRITE_REQUESTS_ID_0 = 21, + AXI_WRITE_REQUESTS_ID_1 = 22, + AXI_WRITE_REQUESTS_ID_2 = 23, + AXI_WRITE_REQUESTS_ID_3 = 24, + AXI_WRITE_REQUESTS_ID_4 = 25, + AXI_WRITE_REQUESTS_ID_5 = 26, + AXI_WRITE_REQUESTS_ID_6 = 27, + AXI_WRITE_REQUESTS_ID_7 = 28, + AXI_WRITE_REQUESTS_ID_8 = 29, + AXI_WRITE_REQUESTS_ID_9 = 30, + AXI_WRITE_REQUESTS_ID_10 = 31, + AXI_WRITE_REQUESTS_ID_11 = 32, + AXI_WRITE_REQUESTS_ID_12 = 33, + AXI_WRITE_REQUESTS_ID_13 = 34, + AXI_WRITE_REQUESTS_ID_14 = 35, + AXI_WRITE_REQUESTS_ID_15 = 36, + AXI0_WRITE_REQUESTS_TOTAL = 37, + AXI1_WRITE_REQUESTS_TOTAL = 38, + AXI2_WRITE_REQUESTS_TOTAL = 39, + AXI3_WRITE_REQUESTS_TOTAL = 40, + AXI_WRITE_REQUESTS_TOTAL = 41, + AXI_TOTAL_REQUESTS = 42, + AXI_READ_DATA_BEATS_ID_0 = 43, + AXI_READ_DATA_BEATS_ID_1 = 44, + AXI_READ_DATA_BEATS_ID_2 = 45, + AXI_READ_DATA_BEATS_ID_3 = 46, + AXI_READ_DATA_BEATS_ID_4 = 47, + AXI_READ_DATA_BEATS_ID_5 = 48, + AXI_READ_DATA_BEATS_ID_6 = 49, + AXI_READ_DATA_BEATS_ID_7 = 50, + AXI_READ_DATA_BEATS_ID_8 = 51, + AXI_READ_DATA_BEATS_ID_9 = 52, + AXI_READ_DATA_BEATS_ID_10 = 53, + AXI_READ_DATA_BEATS_ID_11 = 54, + AXI_READ_DATA_BEATS_ID_12 = 55, + AXI_READ_DATA_BEATS_ID_13 = 56, + AXI_READ_DATA_BEATS_ID_14 = 57, + AXI_READ_DATA_BEATS_ID_15 = 58, + AXI0_READ_DATA_BEATS_TOTAL = 59, + AXI1_READ_DATA_BEATS_TOTAL = 60, + AXI2_READ_DATA_BEATS_TOTAL = 61, + AXI3_READ_DATA_BEATS_TOTAL = 62, + AXI_READ_DATA_BEATS_TOTAL = 63, + AXI_WRITE_DATA_BEATS_ID_0 = 64, + AXI_WRITE_DATA_BEATS_ID_1 = 65, + AXI_WRITE_DATA_BEATS_ID_2 = 66, + AXI_WRITE_DATA_BEATS_ID_3 = 67, + AXI_WRITE_DATA_BEATS_ID_4 = 68, + AXI_WRITE_DATA_BEATS_ID_5 = 69, + AXI_WRITE_DATA_BEATS_ID_6 = 70, + AXI_WRITE_DATA_BEATS_ID_7 = 71, + AXI_WRITE_DATA_BEATS_ID_8 = 72, + AXI_WRITE_DATA_BEATS_ID_9 = 73, + AXI_WRITE_DATA_BEATS_ID_10 = 74, + AXI_WRITE_DATA_BEATS_ID_11 = 75, + AXI_WRITE_DATA_BEATS_ID_12 = 76, + AXI_WRITE_DATA_BEATS_ID_13 = 77, + AXI_WRITE_DATA_BEATS_ID_14 = 78, + AXI_WRITE_DATA_BEATS_ID_15 = 79, + AXI0_WRITE_DATA_BEATS_TOTAL = 80, + AXI1_WRITE_DATA_BEATS_TOTAL = 81, + AXI2_WRITE_DATA_BEATS_TOTAL = 82, + AXI3_WRITE_DATA_BEATS_TOTAL = 83, + AXI_WRITE_DATA_BEATS_TOTAL = 84, + AXI_DATA_BEATS_TOTAL = 85, + CYCLES_HELD_OFF_ID_0 = 86, + CYCLES_HELD_OFF_ID_1 = 87, + CYCLES_HELD_OFF_ID_2 = 88, + CYCLES_HELD_OFF_ID_3 = 89, + CYCLES_HELD_OFF_ID_4 = 90, + CYCLES_HELD_OFF_ID_5 = 91, + CYCLES_HELD_OFF_ID_6 = 92, + CYCLES_HELD_OFF_ID_7 = 93, + CYCLES_HELD_OFF_ID_8 = 94, + CYCLES_HELD_OFF_ID_9 = 95, + CYCLES_HELD_OFF_ID_10 = 96, + CYCLES_HELD_OFF_ID_11 = 97, + CYCLES_HELD_OFF_ID_12 = 98, + CYCLES_HELD_OFF_ID_13 = 99, + CYCLES_HELD_OFF_ID_14 = 100, + CYCLES_HELD_OFF_ID_15 = 101, + AXI_READ_REQUEST_HELD_OFF = 102, + AXI_WRITE_REQUEST_HELD_OFF = 103, + AXI_REQUEST_HELD_OFF = 104, + AXI_WRITE_DATA_HELD_OFF = 105, + OCMEM_AXI_READ_REQUEST_HELD_OFF = 106, + OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107, + OCMEM_AXI_REQUEST_HELD_OFF = 108, + OCMEM_AXI_WRITE_DATA_HELD_OFF = 109, + ELAPSED_CYCLES_DDR = 110, + ELAPSED_CYCLES_OCMEM = 111, +}; + +enum a4xx_vfd_perfcounter_select { + VFD_UCHE_BYTE_FETCHED = 0, + VFD_UCHE_TRANS = 1, + VFD_FETCH_INSTRUCTIONS = 3, + VFD_BUSY_CYCLES = 5, + VFD_STALL_CYCLES_UCHE = 6, + VFD_STALL_CYCLES_HLSQ = 7, + VFD_STALL_CYCLES_VPC_BYPASS = 8, + VFD_STALL_CYCLES_VPC_ALLOC = 9, + VFD_MODE_0_FIBERS = 13, + VFD_MODE_1_FIBERS = 14, + VFD_MODE_2_FIBERS = 15, + VFD_MODE_3_FIBERS = 16, + VFD_MODE_4_FIBERS = 17, + VFD_BFIFO_STALL = 18, + VFD_NUM_VERTICES_TOTAL = 19, + VFD_PACKER_FULL = 20, + VFD_UCHE_REQUEST_FIFO_FULL = 21, + VFD_STARVE_CYCLES_PC = 22, + VFD_STARVE_CYCLES_UCHE = 23, +}; + +enum a4xx_vpc_perfcounter_select { + VPC_SP_LM_COMPONENTS = 2, + VPC_SP0_LM_BYTES = 3, + VPC_SP1_LM_BYTES = 4, + VPC_SP2_LM_BYTES = 5, + VPC_SP3_LM_BYTES = 6, + VPC_WORKING_CYCLES = 7, + VPC_STALL_CYCLES_LM = 8, + VPC_STARVE_CYCLES_RAS = 9, + VPC_STREAMOUT_CYCLES = 10, + VPC_UCHE_TRANSACTIONS = 12, + VPC_STALL_CYCLES_UCHE = 13, + VPC_BUSY_CYCLES = 14, + VPC_STARVE_CYCLES_SP = 15, +}; + +enum a4xx_vsc_perfcounter_select { + VSC_BUSY_CYCLES = 0, + VSC_WORKING_CYCLES = 1, + VSC_STALL_CYCLES_UCHE = 2, + VSC_STARVE_CYCLES_RAS = 3, + VSC_EOT_NUM = 4, +}; + enum a4xx_tex_filter { A4XX_TEX_NEAREST = 0, A4XX_TEX_LINEAR = 1, @@ -326,6 +897,12 @@ static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val) #define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce +#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf + +#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0 + +#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1 + #define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2 #define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0 @@ -400,8 +977,13 @@ static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 #define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008 #define A4XX_RB_MRT_CONTROL_BLEND 0x00000010 #define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020 -#define A4XX_RB_MRT_CONTROL_FASTCLEAR 0x00000400 -#define A4XX_RB_MRT_CONTROL_B11 0x00000800 +#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040 +#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00 +#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8 +static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) +{ + return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK; +} #define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000 #define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24 static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) @@ -490,8 +1072,8 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; } -#define REG_A4XX_RB_BLEND_RED 0x000020f3 -#define A4XX_RB_BLEND_RED_UINT__MASK 0x00007fff +#define REG_A4XX_RB_BLEND_RED 0x000020f0 +#define A4XX_RB_BLEND_RED_UINT__MASK 0x0000ffff #define A4XX_RB_BLEND_RED_UINT__SHIFT 0 static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val) { @@ -504,8 +1086,16 @@ static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val) return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK; } -#define REG_A4XX_RB_BLEND_GREEN 0x000020f4 -#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x00007fff +#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1 +#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff +#define A4XX_RB_BLEND_RED_F32__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_RED_F32(float val) +{ + return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK; +} + +#define REG_A4XX_RB_BLEND_GREEN 0x000020f2 +#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x0000ffff #define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0 static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val) { @@ -518,8 +1108,16 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val) return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK; } -#define REG_A4XX_RB_BLEND_BLUE 0x000020f5 -#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x00007fff +#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3 +#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff +#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val) +{ + return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK; +} + +#define REG_A4XX_RB_BLEND_BLUE 0x000020f4 +#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x0000ffff #define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0 static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val) { @@ -532,8 +1130,16 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val) return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK; } +#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5 +#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff +#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val) +{ + return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK; +} + #define REG_A4XX_RB_BLEND_ALPHA 0x000020f6 -#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x00007fff +#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x0000ffff #define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0 static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val) { @@ -546,6 +1152,14 @@ static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val) return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK; } +#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7 +#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff +#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val) +{ + return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK; +} + #define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8 #define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff #define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 @@ -568,7 +1182,7 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val) { return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK; } -#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100 +#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16 static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val) @@ -736,6 +1350,7 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) } #define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080 #define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000 +#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000 #define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000 #define REG_A4XX_RB_DEPTH_CLEAR 0x00002102 @@ -996,8 +1611,386 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x #define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d +#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098 +#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001 +#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000 + #define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c +#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d + +#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e + +#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f + +#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0 + +#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1 + +#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2 + +#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3 + +#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4 + +#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5 + +#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6 + +#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7 + +#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8 + +#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9 + +#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa + +#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab + +#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac + +#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad + +#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae + +#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af + +#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3 + +#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4 + +#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5 + +#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6 + +#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7 + +#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8 + +#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9 + +#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba + +#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb + +#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc + +#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd + +#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be + +#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf + +#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0 + +#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1 + +#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2 + +#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3 + +#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4 + +#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5 + +#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6 + +#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7 + +#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8 + +#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9 + +#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca + +#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb + +#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc + +#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd + +#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce + +#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf + +#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0 + +#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1 + +#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2 + +#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3 + +#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4 + +#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5 + +#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6 + +#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7 + +#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8 + +#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9 + +#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea + +#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb + +#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec + +#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed + +#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee + +#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef + +#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0 + +#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1 + +#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2 + +#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3 + +#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4 + +#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5 + +#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6 + +#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7 + +#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8 + +#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9 + +#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa + +#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb + +#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc + +#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd + +#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe + +#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff + +#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100 + +#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101 + +#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102 + +#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a + +#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b + +#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c + +#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d + +#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e + +#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f + +#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113 + +#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114 + +#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115 + +#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114 + +#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115 + +#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116 + +#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117 + +#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118 + +#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119 + +#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a + +#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b + +#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c + +#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d + +#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e + +#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f + +#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120 + +#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121 + +#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122 + +#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123 + +#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124 + +#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125 + +#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126 + +#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127 + +#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128 + +#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129 + +#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a + +#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b + +#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c + +#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d + +#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e + +#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f + +#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130 + +#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131 + +#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132 + +#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133 + +#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134 + +#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135 + +#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136 + +#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137 + +#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138 + +#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139 + +#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a + +#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b + +#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c + +#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d + +#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e + +#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f + +#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140 + +#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141 + +#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142 + +#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143 + +#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144 + +#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145 + +#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146 + +#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147 + +#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148 + +#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149 + +#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a + +#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b + +#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c + +#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d + +#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e + +#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f + +#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166 + +#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167 + +#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168 + +#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169 + +#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e + +#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f + static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; } static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; } @@ -1046,6 +2039,10 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { r static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; } +#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099 + +#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a + #define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168 #define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170 @@ -1060,6 +2057,14 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) #define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175 +#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179 + #define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a #define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d @@ -1099,6 +2104,11 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) #define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f +#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0 +#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000 + +#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8 + #define REG_A4XX_CP_SCRATCH_UMASK 0x00000228 #define REG_A4XX_CP_SCRATCH_ADDR 0x00000229 @@ -1191,6 +2201,20 @@ static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 #define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500 +#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507 + #define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; } @@ -1201,6 +2225,28 @@ static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 #define REG_A4XX_SP_MODE_CONTROL 0x00000ec3 +#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca + +#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb + +#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc + +#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd + +#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece + #define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf #define REG_A4XX_SP_SP_CTRL_REG 0x000022c0 @@ -1699,6 +2745,12 @@ static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) #define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64 +#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65 + +#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66 + +#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67 + #define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68 #define REG_A4XX_VPC_ATTR 0x00002140 @@ -1811,6 +2863,20 @@ static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0 #define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40 +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49 + #define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a #define REG_A4XX_VGT_CL_INITIATOR 0x000021d0 @@ -1967,6 +3033,20 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val) #define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03 +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a + #define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b #define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380 @@ -2021,9 +3101,23 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val) #define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88 +#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89 + +#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a + #define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b +#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c + +#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d + +#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e + +#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f + #define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000 +#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000 +#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000 #define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003 #define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001 @@ -2114,6 +3208,7 @@ static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val) #define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073 #define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004 +#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008 #define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074 #define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff @@ -2285,6 +3380,20 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val) #define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90 + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91 + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92 + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93 + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94 + #define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95 #define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00 @@ -2295,6 +3404,22 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val) #define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06 + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07 + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08 + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09 + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d + #define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0 #define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010 #define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4 @@ -2549,6 +3674,18 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10 +#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16 + #define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17 #define REG_A4XX_PC_BIN_BASE 0x000021c0 @@ -2564,7 +3701,20 @@ static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val) #define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 #define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 -#define REG_A4XX_UNKNOWN_21C5 0x000021c5 +#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5 +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007 +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0 +static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK; +} +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038 +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3 +static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK; +} +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040 #define REG_A4XX_PC_RESTART_INDEX 0x000021c6 @@ -2646,20 +3796,6 @@ static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val) #define REG_A4XX_UNKNOWN_20EF 0x000020ef -#define REG_A4XX_UNKNOWN_20F0 0x000020f0 - -#define REG_A4XX_UNKNOWN_20F1 0x000020f1 - -#define REG_A4XX_UNKNOWN_20F2 0x000020f2 - -#define REG_A4XX_UNKNOWN_20F7 0x000020f7 -#define A4XX_UNKNOWN_20F7__MASK 0xffffffff -#define A4XX_UNKNOWN_20F7__SHIFT 0 -static inline uint32_t A4XX_UNKNOWN_20F7(float val) -{ - return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK; -} - #define REG_A4XX_UNKNOWN_2152 0x00002152 #define REG_A4XX_UNKNOWN_2153 0x00002153 @@ -2720,6 +3856,12 @@ static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val) { return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK; } +#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000 +#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19 +static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val) +{ + return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK; +} #define REG_A4XX_TEX_SAMP_1 0x00000001 #define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e @@ -2728,6 +3870,7 @@ static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val { return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK; } +#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010 #define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 #define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 #define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 @@ -2796,7 +3939,7 @@ static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val) { return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK; } -#define A4XX_TEX_CONST_1_WIDTH__MASK 0x1fff8000 +#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000 #define A4XX_TEX_CONST_1_WIDTH__SHIFT 15 static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val) { diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index a53f1be05f75..d0d3c7baa8fe 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -102,11 +102,17 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000); - gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000); - gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000); + /* Early A430's have a timing issue with SP/TP power collapse; + disabling HW clock gating prevents it. */ + if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0); + else + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0); } + static void a4xx_me_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb; @@ -141,7 +147,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu) uint32_t *ptr, len; int i, ret; - if (adreno_is_a4xx(adreno_gpu)) { + if (adreno_is_a420(adreno_gpu)) { gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F); gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4); gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001); @@ -150,6 +156,13 @@ static int a4xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018); gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); + } else if (adreno_is_a430(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001); + gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018); + gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018); + gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); } else { BUG(); } @@ -161,6 +174,10 @@ static int a4xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10); gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10); + if (adreno_is_a430(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30); + } + /* Enable the RBBM error reporting bits */ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001); @@ -183,6 +200,14 @@ static int a4xx_hw_init(struct msm_gpu *gpu) /* Turn on performance counters: */ gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01); + /* use the first CP counter for timestamp queries.. userspace may set + * this as well but it selects the same counter/countable: + */ + gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT); + + if (adreno_is_a430(adreno_gpu)) + gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07); + /* Disable L2 bypass to avoid UCHE out of bounds errors */ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000); gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000); @@ -190,6 +215,15 @@ static int a4xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) | (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0)); + /* On A430 enable SP regfile sleep for power savings */ + /* TODO downstream does this for !420, so maybe applies for 405 too? */ + if (!adreno_is_a420(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0, + 0x00000441); + gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1, + 0x00000441); + } + a4xx_enable_hwcg(gpu); /* @@ -204,10 +238,6 @@ static int a4xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val); } - ret = adreno_hw_init(gpu); - if (ret) - return ret; - /* setup access protection: */ gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007); @@ -263,6 +293,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0); a4xx_me_init(gpu); + return 0; } @@ -317,6 +348,13 @@ static irqreturn_t a4xx_irq(struct msm_gpu *gpu) status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS); DBG("%s: Int status %08x", gpu->name, status); + if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) { + uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS); + printk("CP | Protected mode error| %s | addr=%x\n", + reg & (1 << 24) ? "WRITE" : "READ", + (reg & 0xFFFFF) >> 2); + } + gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status); msm_gpu_retire(gpu); @@ -512,12 +550,63 @@ static void a4xx_dump(struct msm_gpu *gpu) adreno_dump(gpu); } +static int a4xx_pm_resume(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + + ret = msm_gpu_pm_resume(gpu); + if (ret) + return ret; + + if (adreno_is_a430(adreno_gpu)) { + unsigned int reg; + /* Set the default register values; set SW_COLLAPSE to 0 */ + gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000); + do { + udelay(5); + reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS); + } while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON)); + } + return 0; +} + +static int a4xx_pm_suspend(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + + ret = msm_gpu_pm_suspend(gpu); + if (ret) + return ret; + + if (adreno_is_a430(adreno_gpu)) { + /* Set the default register values; set SW_COLLAPSE to 1 */ + gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001); + } + return 0; +} + +static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +{ + uint32_t hi, lo, tmp; + + tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI); + do { + hi = tmp; + lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO); + tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI); + } while (tmp != hi); + + *value = (((uint64_t)hi) << 32) | lo; + + return 0; +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, .hw_init = a4xx_hw_init, - .pm_suspend = msm_gpu_pm_suspend, - .pm_resume = msm_gpu_pm_resume, + .pm_suspend = a4xx_pm_suspend, + .pm_resume = a4xx_pm_resume, .recover = a4xx_recover, .last_fence = adreno_last_fence, .submit = adreno_submit, @@ -529,6 +618,7 @@ static const struct adreno_gpu_funcs funcs = { .show = a4xx_show, #endif }, + .get_timestamp = a4xx_get_timestamp, }; struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index c304468cf2bd..e81481d1b7df 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) -Copyright (C) 2013-2015 by the following authors: +Copyright (C) 2013-2016 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -119,6 +120,23 @@ enum adreno_rb_copy_control_mode { RB_COPY_DEPTH_STENCIL = 5, }; +enum a3xx_rop_code { + ROP_CLEAR = 0, + ROP_NOR = 1, + ROP_AND_INVERTED = 2, + ROP_COPY_INVERTED = 3, + ROP_AND_REVERSE = 4, + ROP_INVERT = 5, + ROP_NAND = 7, + ROP_AND = 8, + ROP_EQUIV = 9, + ROP_NOOP = 10, + ROP_OR_INVERTED = 11, + ROP_OR_REVERSE = 13, + ROP_OR = 14, + ROP_SET = 15, +}; + enum a3xx_render_mode { RB_RENDERING_PASS = 0, RB_TILING_PASS = 1, diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 950d27d26b30..5127b75dbf40 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -69,6 +69,14 @@ static const struct adreno_info gpulist[] = { .pfpfw = "a420_pfp.fw", .gmem = (SZ_1M + SZ_512K), .init = a4xx_gpu_init, + }, { + .rev = ADRENO_REV(4, 3, 0, ANY_ID), + .revn = 430, + .name = "A430", + .pm4fw = "a420_pm4.fw", + .pfpfw = "a420_pfp.fw", + .gmem = (SZ_1M + SZ_512K), + .init = a4xx_gpu_init, }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index a3b54cc76495..4951172ede06 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -41,6 +41,13 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) (adreno_gpu->rev.major << 16) | (adreno_gpu->rev.core << 24); return 0; + case MSM_PARAM_MAX_FREQ: + *value = adreno_gpu->base.fast_rate; + return 0; + case MSM_PARAM_TIMESTAMP: + if (adreno_gpu->funcs->get_timestamp) + return adreno_gpu->funcs->get_timestamp(gpu, value); + return -EINVAL; default: DBG("%s: invalid param: %u", gpu->name, param); return -EINVAL; @@ -68,18 +75,15 @@ int adreno_hw_init(struct msm_gpu *gpu) adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, /* size is log2(quad-words): */ AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | - AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8))); + AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) | + (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); /* Setup ringbuffer address: */ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova); - adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, - rbmemptr(adreno_gpu, rptr)); - - /* Setup scratch/timestamp: */ - adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR, - rbmemptr(adreno_gpu, fence)); - adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1); + if (!adreno_is_a430(adreno_gpu)) + adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, + rbmemptr(adreno_gpu, rptr)); return 0; } @@ -89,6 +93,16 @@ static uint32_t get_wptr(struct msm_ringbuffer *ring) return ring->cur - ring->start; } +/* Use this helper to read rptr, since a430 doesn't update rptr in memory */ +static uint32_t get_rptr(struct adreno_gpu *adreno_gpu) +{ + if (adreno_is_a430(adreno_gpu)) + return adreno_gpu->memptrs->rptr = adreno_gpu_read( + adreno_gpu, REG_ADRENO_CP_RB_RPTR); + else + return adreno_gpu->memptrs->rptr; +} + uint32_t adreno_last_fence(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -137,7 +151,8 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, if (priv->lastctx == ctx) break; case MSM_SUBMIT_CMD_BUF: - OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); + OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? + CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); OUT_RING(ring, submit->cmd[i].iova); OUT_RING(ring, submit->cmd[i].size); ibs++; @@ -216,9 +231,12 @@ void adreno_idle(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr = get_wptr(gpu->rb); + int ret; /* wait for CP to drain ringbuffer: */ - if (spin_until(adreno_gpu->memptrs->rptr == wptr)) + ret = spin_until(get_rptr(adreno_gpu) == wptr); + + if (ret) DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); /* TODO maybe we need to reset GPU here to recover from hang? */ @@ -237,7 +255,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, gpu->submitted_fence); - seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr); + seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); @@ -278,7 +296,7 @@ void adreno_dump_info(struct msm_gpu *gpu) printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, gpu->submitted_fence); - printk("rptr: %d\n", adreno_gpu->memptrs->rptr); + printk("rptr: %d\n", get_rptr(adreno_gpu)); printk("wptr: %d\n", adreno_gpu->memptrs->wptr); printk("rb wptr: %d\n", get_wptr(gpu->rb)); @@ -313,7 +331,7 @@ static uint32_t ring_freewords(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t size = gpu->rb->size / 4; uint32_t wptr = get_wptr(gpu->rb); - uint32_t rptr = adreno_gpu->memptrs->rptr; + uint32_t rptr = get_rptr(adreno_gpu); return (rptr + (size - 1) - wptr) % size; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 0a312e9d3afd..1d07511f4d22 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -114,6 +114,7 @@ struct adreno_rev { struct adreno_gpu_funcs { struct msm_gpu_funcs base; + int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); }; struct adreno_info { @@ -228,6 +229,11 @@ static inline int adreno_is_a420(struct adreno_gpu *gpu) return gpu->revn == 420; } +static inline int adreno_is_a430(struct adreno_gpu *gpu) +{ + return gpu->revn == 430; +} + int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_hw_init(struct msm_gpu *gpu); uint32_t adreno_last_fence(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index a22fef569499..d7477ff867c9 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) -Copyright (C) 2013-2015 by the following authors: +Copyright (C) 2013-2016 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -172,6 +173,11 @@ enum adreno_pm4_type3_packets { CP_UNKNOWN_1A = 26, CP_UNKNOWN_4E = 78, CP_WIDE_REG_WRITE = 116, + CP_SCRATCH_TO_REG = 77, + CP_REG_TO_SCRATCH = 74, + CP_WAIT_MEM_WRITES = 18, + CP_COND_REG_EXEC = 71, + CP_MEM_TO_REG = 66, IN_IB_PREFETCH_END = 23, IN_SUBBLK_PREFETCH = 31, IN_INSTR_PREFETCH = 32, @@ -199,7 +205,11 @@ enum adreno_state_type { enum adreno_state_src { SS_DIRECT = 0, + SS_INVALID_ALL_IC = 2, + SS_INVALID_PART_IC = 3, SS_INDIRECT = 4, + SS_INDIRECT_TCM = 5, + SS_INDIRECT_STM = 6, }; enum a4xx_index_size { @@ -227,7 +237,7 @@ static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val) { return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK; } -#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000 +#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000 #define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22 static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val) { @@ -499,5 +509,29 @@ static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val) return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK; } +#define REG_CP_REG_TO_MEM_0 0x00000000 +#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff +#define CP_REG_TO_MEM_0_REG__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK; +} +#define CP_REG_TO_MEM_0_CNT__MASK 0x3ff80000 +#define CP_REG_TO_MEM_0_CNT__SHIFT 19 +static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK; +} +#define CP_REG_TO_MEM_0_64B 0x40000000 +#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000 + +#define REG_CP_REG_TO_MEM_1 0x00000001 +#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff +#define CP_REG_TO_MEM_1_DEST__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK; +} + #endif /* ADRENO_PM4_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index b2b5f3dd1b4c..4958594d5266 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) @@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 2a827d8093a2..e58e9b91b34d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -57,10 +57,9 @@ static const char * const dsi_8916_bus_clk_names[] = { static const struct msm_dsi_config msm8916_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { - .num = 4, + .num = 3, .regs = { {"gdsc", -1, -1, -1, -1}, - {"vdd", 2850000, 2850000, 100000, 100}, {"vdda", 1200000, 1200000, 100000, 100}, {"vddio", 1800000, 1800000, 100000, 100}, }, diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 48f9967b4a1b..4282ec6bbaaf 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -163,6 +163,10 @@ struct msm_dsi_host { enum mipi_dsi_pixel_format format; unsigned long mode_flags; + /* lane data parsed via DT */ + int dlane_swap; + int num_data_lanes; + u32 dma_cmd_ctrl_restore; bool registered; @@ -845,19 +849,10 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, data = DSI_CTRL_CLK_EN; DBG("lane number=%d", msm_host->lanes); - if (msm_host->lanes == 2) { - data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2; - /* swap lanes for 2-lane panel for better performance */ - dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, - DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230)); - } else { - /* Take 4 lanes as default */ - data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 | - DSI_CTRL_LANE3; - /* Do not swap lanes for 4-lane panel */ - dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, - DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123)); - } + data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0); + + dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, + DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap)); if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) dsi_write(msm_host, REG_DSI_LANE_CTRL, @@ -1479,13 +1474,14 @@ static int dsi_host_attach(struct mipi_dsi_host *host, struct msm_dsi_host *msm_host = to_msm_dsi_host(host); int ret; + if (dsi->lanes > msm_host->num_data_lanes) + return -EINVAL; + msm_host->channel = dsi->channel; msm_host->lanes = dsi->lanes; msm_host->format = dsi->format; msm_host->mode_flags = dsi->mode_flags; - WARN_ON(dsi->dev.of_node != msm_host->device_node); - /* Some gpios defined in panel DT need to be controlled by host */ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); if (ret) @@ -1534,6 +1530,75 @@ static struct mipi_dsi_host_ops dsi_host_ops = { .transfer = dsi_host_transfer, }; +/* + * List of supported physical to logical lane mappings. + * For example, the 2nd entry represents the following mapping: + * + * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3; + */ +static const int supported_data_lane_swaps[][4] = { + { 0, 1, 2, 3 }, + { 3, 0, 1, 2 }, + { 2, 3, 0, 1 }, + { 1, 2, 3, 0 }, + { 0, 3, 2, 1 }, + { 1, 0, 3, 2 }, + { 2, 1, 0, 3 }, + { 3, 2, 1, 0 }, +}; + +static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, + struct device_node *ep) +{ + struct device *dev = &msm_host->pdev->dev; + struct property *prop; + u32 lane_map[4]; + int ret, i, len, num_lanes; + + prop = of_find_property(ep, "qcom,data-lane-map", &len); + if (!prop) { + dev_dbg(dev, "failed to find data lane mapping\n"); + return -EINVAL; + } + + num_lanes = len / sizeof(u32); + + if (num_lanes < 1 || num_lanes > 4) { + dev_err(dev, "bad number of data lanes\n"); + return -EINVAL; + } + + msm_host->num_data_lanes = num_lanes; + + ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map, + num_lanes); + if (ret) { + dev_err(dev, "failed to read lane data\n"); + return ret; + } + + /* + * compare DT specified physical-logical lane mappings with the ones + * supported by hardware + */ + for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) { + const int *swap = supported_data_lane_swaps[i]; + int j; + + for (j = 0; j < num_lanes; j++) { + if (swap[j] != lane_map[j]) + break; + } + + if (j == num_lanes) { + msm_host->dlane_swap = i; + return 0; + } + } + + return -EINVAL; +} + static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) { struct device *dev = &msm_host->pdev->dev; @@ -1560,17 +1625,21 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) return 0; } + ret = dsi_host_parse_lane_data(msm_host, endpoint); + if (ret) { + dev_err(dev, "%s: invalid lane configuration %d\n", + __func__, ret); + goto err; + } + /* Get panel node from the output port's endpoint data */ device_node = of_graph_get_remote_port_parent(endpoint); if (!device_node) { dev_err(dev, "%s: no valid device\n", __func__); - of_node_put(endpoint); - return -ENODEV; + ret = -ENODEV; + goto err; } - of_node_put(endpoint); - of_node_put(device_node); - msm_host->device_node = device_node; if (of_property_read_bool(np, "syscon-sfpb")) { @@ -1579,11 +1648,16 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) if (IS_ERR(msm_host->sfpb)) { dev_err(dev, "%s: failed to get sfpb regmap\n", __func__); - return PTR_ERR(msm_host->sfpb); + ret = PTR_ERR(msm_host->sfpb); } } - return 0; + of_node_put(device_node); + +err: + of_node_put(endpoint); + + return ret; } int msm_dsi_host_init(struct msm_dsi *msm_dsi) diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index 80ec65e47468..2d999494cdea 100644 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h @@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) @@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index 80b6038334a6..2cf1664723e8 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -97,8 +97,8 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init( struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev, int id); #else -struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev, - int id) +static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init( + struct platform_device *pdev, int id) { return ERR_PTR(-ENODEV); } diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h index 7d7662e69e11..506434fac993 100644 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h @@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) @@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h index 90bf5ed46746..f1072c18c81e 100644 --- a/drivers/gpu/drm/msm/edp/edp.xml.h +++ b/drivers/gpu/drm/msm/edp/edp.xml.h @@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) @@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 9a0989c0b4de..51b9ea552f97 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -21,7 +21,7 @@ #include "hdmi.h" -void hdmi_set_mode(struct hdmi *hdmi, bool power_on) +void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on) { uint32_t ctrl = 0; unsigned long flags; @@ -46,29 +46,27 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) power_on ? "Enable" : "Disable", ctrl); } -static irqreturn_t hdmi_irq(int irq, void *dev_id) +static irqreturn_t msm_hdmi_irq(int irq, void *dev_id) { struct hdmi *hdmi = dev_id; /* Process HPD: */ - hdmi_connector_irq(hdmi->connector); + msm_hdmi_connector_irq(hdmi->connector); /* Process DDC: */ - hdmi_i2c_irq(hdmi->i2c); + msm_hdmi_i2c_irq(hdmi->i2c); /* Process HDCP: */ if (hdmi->hdcp_ctrl) - hdmi_hdcp_irq(hdmi->hdcp_ctrl); + msm_hdmi_hdcp_irq(hdmi->hdcp_ctrl); /* TODO audio.. */ return IRQ_HANDLED; } -static void hdmi_destroy(struct hdmi *hdmi) +static void msm_hdmi_destroy(struct hdmi *hdmi) { - struct hdmi_phy *phy = hdmi->phy; - /* * at this point, hpd has been disabled, * after flush workq, it's safe to deinit hdcp @@ -77,21 +75,53 @@ static void hdmi_destroy(struct hdmi *hdmi) flush_workqueue(hdmi->workq); destroy_workqueue(hdmi->workq); } - hdmi_hdcp_destroy(hdmi); - if (phy) - phy->funcs->destroy(phy); + msm_hdmi_hdcp_destroy(hdmi); + + if (hdmi->phy_dev) { + put_device(hdmi->phy_dev); + hdmi->phy = NULL; + hdmi->phy_dev = NULL; + } if (hdmi->i2c) - hdmi_i2c_destroy(hdmi->i2c); + msm_hdmi_i2c_destroy(hdmi->i2c); platform_set_drvdata(hdmi->pdev, NULL); } +static int msm_hdmi_get_phy(struct hdmi *hdmi) +{ + struct platform_device *pdev = hdmi->pdev; + struct platform_device *phy_pdev; + struct device_node *phy_node; + + phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0); + if (!phy_node) { + dev_err(&pdev->dev, "cannot find phy device\n"); + return -ENXIO; + } + + phy_pdev = of_find_device_by_node(phy_node); + if (phy_pdev) + hdmi->phy = platform_get_drvdata(phy_pdev); + + of_node_put(phy_node); + + if (!phy_pdev || !hdmi->phy) { + dev_err(&pdev->dev, "phy driver is not ready\n"); + return -EPROBE_DEFER; + } + + hdmi->phy_dev = get_device(&phy_pdev->dev); + + return 0; +} + /* construct hdmi at bind/probe time, grab all the resources. If * we are to EPROBE_DEFER we want to do it here, rather than later * at modeset_init() time */ -static struct hdmi *hdmi_init(struct platform_device *pdev) +static struct hdmi *msm_hdmi_init(struct platform_device *pdev) { struct hdmi_platform_config *config = pdev->dev.platform_data; struct hdmi *hdmi = NULL; @@ -108,18 +138,6 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->config = config; spin_lock_init(&hdmi->reg_lock); - /* not sure about which phy maps to which msm.. probably I miss some */ - if (config->phy_init) { - hdmi->phy = config->phy_init(hdmi); - - if (IS_ERR(hdmi->phy)) { - ret = PTR_ERR(hdmi->phy); - dev_err(&pdev->dev, "failed to load phy: %d\n", ret); - hdmi->phy = NULL; - goto fail; - } - } - hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI"); if (IS_ERR(hdmi->mmio)) { ret = PTR_ERR(hdmi->mmio); @@ -222,7 +240,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); - hdmi->i2c = hdmi_i2c_init(hdmi); + hdmi->i2c = msm_hdmi_i2c_init(hdmi); if (IS_ERR(hdmi->i2c)) { ret = PTR_ERR(hdmi->i2c); dev_err(&pdev->dev, "failed to get i2c: %d\n", ret); @@ -230,7 +248,13 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) goto fail; } - hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi); + ret = msm_hdmi_get_phy(hdmi); + if (ret) { + dev_err(&pdev->dev, "failed to get phy\n"); + goto fail; + } + + hdmi->hdcp_ctrl = msm_hdmi_hdcp_init(hdmi); if (IS_ERR(hdmi->hdcp_ctrl)) { dev_warn(&pdev->dev, "failed to init hdcp: disabled\n"); hdmi->hdcp_ctrl = NULL; @@ -240,7 +264,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) fail: if (hdmi) - hdmi_destroy(hdmi); + msm_hdmi_destroy(hdmi); return ERR_PTR(ret); } @@ -250,10 +274,10 @@ fail: * driver (not hdmi sub-device's probe/bind!) * * Any resource (regulator/clk/etc) which could be missing at boot - * should be handled in hdmi_init() so that failure happens from + * should be handled in msm_hdmi_init() so that failure happens from * hdmi sub-device's probe. */ -int hdmi_modeset_init(struct hdmi *hdmi, +int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, struct drm_encoder *encoder) { struct msm_drm_private *priv = dev->dev_private; @@ -265,7 +289,7 @@ int hdmi_modeset_init(struct hdmi *hdmi, hdmi_audio_infoframe_init(&hdmi->audio.infoframe); - hdmi->bridge = hdmi_bridge_init(hdmi); + hdmi->bridge = msm_hdmi_bridge_init(hdmi); if (IS_ERR(hdmi->bridge)) { ret = PTR_ERR(hdmi->bridge); dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret); @@ -273,7 +297,7 @@ int hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - hdmi->connector = hdmi_connector_init(hdmi); + hdmi->connector = msm_hdmi_connector_init(hdmi); if (IS_ERR(hdmi->connector)) { ret = PTR_ERR(hdmi->connector); dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret); @@ -289,7 +313,7 @@ int hdmi_modeset_init(struct hdmi *hdmi, } ret = devm_request_irq(&pdev->dev, hdmi->irq, - hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "hdmi_isr", hdmi); if (ret < 0) { dev_err(dev->dev, "failed to request IRQ%u: %d\n", @@ -309,7 +333,7 @@ int hdmi_modeset_init(struct hdmi *hdmi, fail: /* bridge is normally destroyed by drm: */ if (hdmi->bridge) { - hdmi_bridge_destroy(hdmi->bridge); + msm_hdmi_bridge_destroy(hdmi->bridge); hdmi->bridge = NULL; } if (hdmi->connector) { @@ -331,15 +355,12 @@ fail: static const char *pwr_reg_names_none[] = {}; static const char *hpd_reg_names_none[] = {}; -static struct hdmi_platform_config hdmi_tx_8660_config = { - .phy_init = hdmi_phy_8x60_init, -}; +static struct hdmi_platform_config hdmi_tx_8660_config; static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"}; static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; static struct hdmi_platform_config hdmi_tx_8960_config = { - .phy_init = hdmi_phy_8960_init, HDMI_CFG(hpd_reg, 8960), HDMI_CFG(hpd_clk, 8960), }; @@ -351,7 +372,6 @@ static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_cl static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0}; static struct hdmi_platform_config hdmi_tx_8974_config = { - .phy_init = hdmi_phy_8x74_init, HDMI_CFG(pwr_reg, 8x74), HDMI_CFG(hpd_reg, 8x74), HDMI_CFG(pwr_clk, 8x74), @@ -362,7 +382,6 @@ static struct hdmi_platform_config hdmi_tx_8974_config = { static const char *hpd_reg_names_8084[] = {"hpd-gdsc", "hpd-5v", "hpd-5v-en"}; static struct hdmi_platform_config hdmi_tx_8084_config = { - .phy_init = hdmi_phy_8x74_init, HDMI_CFG(pwr_reg, 8x74), HDMI_CFG(hpd_reg, 8084), HDMI_CFG(pwr_clk, 8x74), @@ -371,7 +390,6 @@ static struct hdmi_platform_config hdmi_tx_8084_config = { }; static struct hdmi_platform_config hdmi_tx_8994_config = { - .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */ HDMI_CFG(pwr_reg, 8x74), HDMI_CFG(hpd_reg, none), HDMI_CFG(pwr_clk, 8x74), @@ -380,7 +398,6 @@ static struct hdmi_platform_config hdmi_tx_8994_config = { }; static struct hdmi_platform_config hdmi_tx_8996_config = { - .phy_init = NULL, HDMI_CFG(pwr_reg, none), HDMI_CFG(hpd_reg, none), HDMI_CFG(pwr_clk, 8x74), @@ -388,7 +405,21 @@ static struct hdmi_platform_config hdmi_tx_8996_config = { .hpd_freq = hpd_clk_freq_8x74, }; -static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) +static const struct { + const char *name; + const bool output; + const int value; + const char *label; +} msm_hdmi_gpio_pdata[] = { + { "qcom,hdmi-tx-ddc-clk", true, 1, "HDMI_DDC_CLK" }, + { "qcom,hdmi-tx-ddc-data", true, 1, "HDMI_DDC_DATA" }, + { "qcom,hdmi-tx-hpd", false, 1, "HDMI_HPD" }, + { "qcom,hdmi-tx-mux-en", true, 1, "HDMI_MUX_EN" }, + { "qcom,hdmi-tx-mux-sel", true, 0, "HDMI_MUX_SEL" }, + { "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" }, +}; + +static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name) { int gpio = of_get_named_gpio(of_node, name, 0); if (gpio < 0) { @@ -403,13 +434,14 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char return gpio; } -static int hdmi_bind(struct device *dev, struct device *master, void *data) +static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; static struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; struct device_node *of_node = dev->of_node; + int i; hdmi_cfg = (struct hdmi_platform_config *) of_device_get_match_data(dev); @@ -420,16 +452,18 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) hdmi_cfg->mmio_name = "core_physical"; hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; - hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); - hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); - hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); - hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); - hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); - hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); + + for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { + hdmi_cfg->gpios[i].num = msm_hdmi_get_gpio(of_node, + msm_hdmi_gpio_pdata[i].name); + hdmi_cfg->gpios[i].output = msm_hdmi_gpio_pdata[i].output; + hdmi_cfg->gpios[i].value = msm_hdmi_gpio_pdata[i].value; + hdmi_cfg->gpios[i].label = msm_hdmi_gpio_pdata[i].label; + } dev->platform_data = hdmi_cfg; - hdmi = hdmi_init(to_platform_device(dev)); + hdmi = msm_hdmi_init(to_platform_device(dev)); if (IS_ERR(hdmi)) return PTR_ERR(hdmi); priv->hdmi = hdmi; @@ -437,34 +471,34 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) return 0; } -static void hdmi_unbind(struct device *dev, struct device *master, +static void msm_hdmi_unbind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; if (priv->hdmi) { - hdmi_destroy(priv->hdmi); + msm_hdmi_destroy(priv->hdmi); priv->hdmi = NULL; } } -static const struct component_ops hdmi_ops = { - .bind = hdmi_bind, - .unbind = hdmi_unbind, +static const struct component_ops msm_hdmi_ops = { + .bind = msm_hdmi_bind, + .unbind = msm_hdmi_unbind, }; -static int hdmi_dev_probe(struct platform_device *pdev) +static int msm_hdmi_dev_probe(struct platform_device *pdev) { - return component_add(&pdev->dev, &hdmi_ops); + return component_add(&pdev->dev, &msm_hdmi_ops); } -static int hdmi_dev_remove(struct platform_device *pdev) +static int msm_hdmi_dev_remove(struct platform_device *pdev) { - component_del(&pdev->dev, &hdmi_ops); + component_del(&pdev->dev, &msm_hdmi_ops); return 0; } -static const struct of_device_id dt_match[] = { +static const struct of_device_id msm_hdmi_dt_match[] = { { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config }, { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config }, { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config }, @@ -474,21 +508,23 @@ static const struct of_device_id dt_match[] = { {} }; -static struct platform_driver hdmi_driver = { - .probe = hdmi_dev_probe, - .remove = hdmi_dev_remove, +static struct platform_driver msm_hdmi_driver = { + .probe = msm_hdmi_dev_probe, + .remove = msm_hdmi_dev_remove, .driver = { .name = "hdmi_msm", - .of_match_table = dt_match, + .of_match_table = msm_hdmi_dt_match, }, }; -void __init hdmi_register(void) +void __init msm_hdmi_register(void) { - platform_driver_register(&hdmi_driver); + msm_hdmi_phy_driver_register(); + platform_driver_register(&msm_hdmi_driver); } -void __exit hdmi_unregister(void) +void __exit msm_hdmi_unregister(void) { - platform_driver_unregister(&hdmi_driver); + platform_driver_unregister(&msm_hdmi_driver); + msm_hdmi_phy_driver_unregister(); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index d0e663192d01..b04a64664673 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -27,10 +27,18 @@ #include "msm_drv.h" #include "hdmi.xml.h" +#define HDMI_MAX_NUM_GPIO 6 struct hdmi_phy; struct hdmi_platform_config; +struct hdmi_gpio_data { + int num; + bool output; + int value; + const char *label; +}; + struct hdmi_audio { bool enabled; struct hdmi_audio_infoframe infoframe; @@ -62,6 +70,8 @@ struct hdmi { struct clk **pwr_clks; struct hdmi_phy *phy; + struct device *phy_dev; + struct i2c_adapter *i2c; struct drm_connector *connector; struct drm_bridge *bridge; @@ -88,7 +98,6 @@ struct hdmi { /* platform config data (ie. from DT, or pdata) */ struct hdmi_platform_config { - struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); const char *mmio_name; const char *qfprom_mmio_name; @@ -110,11 +119,10 @@ struct hdmi_platform_config { int pwr_clk_cnt; /* gpio's: */ - int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; - int mux_lpm_gpio; + struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO]; }; -void hdmi_set_mode(struct hdmi *hdmi, bool power_on); +void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on); static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) { @@ -132,65 +140,113 @@ static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg) } /* - * The phy appears to be different, for example between 8960 and 8x60, - * so split the phy related functions out and load the correct one at - * runtime: + * hdmi phy: */ -struct hdmi_phy_funcs { - void (*destroy)(struct hdmi_phy *phy); +enum hdmi_phy_type { + MSM_HDMI_PHY_8x60, + MSM_HDMI_PHY_8960, + MSM_HDMI_PHY_8x74, + MSM_HDMI_PHY_8996, + MSM_HDMI_PHY_MAX, +}; + +struct hdmi_phy_cfg { + enum hdmi_phy_type type; void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock); void (*powerdown)(struct hdmi_phy *phy); + const char * const *reg_names; + int num_regs; + const char * const *clk_names; + int num_clks; }; +extern const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg; +extern const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg; +extern const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg; +extern const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg; + struct hdmi_phy { + struct platform_device *pdev; + void __iomem *mmio; + struct hdmi_phy_cfg *cfg; const struct hdmi_phy_funcs *funcs; + struct regulator **regs; + struct clk **clks; }; -struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi); -struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi); -struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi); +static inline void hdmi_phy_write(struct hdmi_phy *phy, u32 reg, u32 data) +{ + msm_writel(data, phy->mmio + reg); +} + +static inline u32 hdmi_phy_read(struct hdmi_phy *phy, u32 reg) +{ + return msm_readl(phy->mmio + reg); +} + +int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy); +void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy); +void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock); +void msm_hdmi_phy_powerdown(struct hdmi_phy *phy); +void __init msm_hdmi_phy_driver_register(void); +void __exit msm_hdmi_phy_driver_unregister(void); + +#ifdef CONFIG_COMMON_CLK +int msm_hdmi_pll_8960_init(struct platform_device *pdev); +int msm_hdmi_pll_8996_init(struct platform_device *pdev); +#else +static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev); +{ + return -ENODEV; +} + +static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev) +{ + return -ENODEV; +} +#endif /* * audio: */ -int hdmi_audio_update(struct hdmi *hdmi); -int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, +int msm_hdmi_audio_update(struct hdmi *hdmi); +int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, uint32_t num_of_channels, uint32_t channel_allocation, uint32_t level_shift, bool down_mix); -void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate); +void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate); /* * hdmi bridge: */ -struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi); -void hdmi_bridge_destroy(struct drm_bridge *bridge); +struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi); +void msm_hdmi_bridge_destroy(struct drm_bridge *bridge); /* * hdmi connector: */ -void hdmi_connector_irq(struct drm_connector *connector); -struct drm_connector *hdmi_connector_init(struct hdmi *hdmi); +void msm_hdmi_connector_irq(struct drm_connector *connector); +struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi); /* * i2c adapter for ddc: */ -void hdmi_i2c_irq(struct i2c_adapter *i2c); -void hdmi_i2c_destroy(struct i2c_adapter *i2c); -struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi); +void msm_hdmi_i2c_irq(struct i2c_adapter *i2c); +void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c); +struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi); /* * hdcp */ -struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi); -void hdmi_hdcp_destroy(struct hdmi *hdmi); -void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl); -void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl); -void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl); +struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi); +void msm_hdmi_hdcp_destroy(struct hdmi *hdmi); +void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl); +void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl); +void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl); #endif /* __HDMI_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index 10c45700aefe..34c7df6549c1 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) @@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) -Copyright (C) 2013-2015 by the following authors: +Copyright (C) 2013-2016 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -559,7 +560,7 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) #define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370 -#define REG_HDMI_8x60_PHY_REG0 0x00000300 +#define REG_HDMI_8x60_PHY_REG0 0x00000000 #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2 static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val) @@ -567,7 +568,7 @@ static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val) return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK; } -#define REG_HDMI_8x60_PHY_REG1 0x00000304 +#define REG_HDMI_8x60_PHY_REG1 0x00000004 #define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0 #define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4 static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val) @@ -581,7 +582,7 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK; } -#define REG_HDMI_8x60_PHY_REG2 0x00000308 +#define REG_HDMI_8x60_PHY_REG2 0x00000008 #define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001 #define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002 #define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004 @@ -591,152 +592,152 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) #define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040 #define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080 -#define REG_HDMI_8x60_PHY_REG3 0x0000030c +#define REG_HDMI_8x60_PHY_REG3 0x0000000c #define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001 -#define REG_HDMI_8x60_PHY_REG4 0x00000310 +#define REG_HDMI_8x60_PHY_REG4 0x00000010 -#define REG_HDMI_8x60_PHY_REG5 0x00000314 +#define REG_HDMI_8x60_PHY_REG5 0x00000014 -#define REG_HDMI_8x60_PHY_REG6 0x00000318 +#define REG_HDMI_8x60_PHY_REG6 0x00000018 -#define REG_HDMI_8x60_PHY_REG7 0x0000031c +#define REG_HDMI_8x60_PHY_REG7 0x0000001c -#define REG_HDMI_8x60_PHY_REG8 0x00000320 +#define REG_HDMI_8x60_PHY_REG8 0x00000020 -#define REG_HDMI_8x60_PHY_REG9 0x00000324 +#define REG_HDMI_8x60_PHY_REG9 0x00000024 -#define REG_HDMI_8x60_PHY_REG10 0x00000328 +#define REG_HDMI_8x60_PHY_REG10 0x00000028 -#define REG_HDMI_8x60_PHY_REG11 0x0000032c +#define REG_HDMI_8x60_PHY_REG11 0x0000002c -#define REG_HDMI_8x60_PHY_REG12 0x00000330 +#define REG_HDMI_8x60_PHY_REG12 0x00000030 #define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001 #define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002 #define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010 -#define REG_HDMI_8960_PHY_REG0 0x00000400 +#define REG_HDMI_8960_PHY_REG0 0x00000000 -#define REG_HDMI_8960_PHY_REG1 0x00000404 +#define REG_HDMI_8960_PHY_REG1 0x00000004 -#define REG_HDMI_8960_PHY_REG2 0x00000408 +#define REG_HDMI_8960_PHY_REG2 0x00000008 -#define REG_HDMI_8960_PHY_REG3 0x0000040c +#define REG_HDMI_8960_PHY_REG3 0x0000000c -#define REG_HDMI_8960_PHY_REG4 0x00000410 +#define REG_HDMI_8960_PHY_REG4 0x00000010 -#define REG_HDMI_8960_PHY_REG5 0x00000414 +#define REG_HDMI_8960_PHY_REG5 0x00000014 -#define REG_HDMI_8960_PHY_REG6 0x00000418 +#define REG_HDMI_8960_PHY_REG6 0x00000018 -#define REG_HDMI_8960_PHY_REG7 0x0000041c +#define REG_HDMI_8960_PHY_REG7 0x0000001c -#define REG_HDMI_8960_PHY_REG8 0x00000420 +#define REG_HDMI_8960_PHY_REG8 0x00000020 -#define REG_HDMI_8960_PHY_REG9 0x00000424 +#define REG_HDMI_8960_PHY_REG9 0x00000024 -#define REG_HDMI_8960_PHY_REG10 0x00000428 +#define REG_HDMI_8960_PHY_REG10 0x00000028 -#define REG_HDMI_8960_PHY_REG11 0x0000042c +#define REG_HDMI_8960_PHY_REG11 0x0000002c -#define REG_HDMI_8960_PHY_REG12 0x00000430 +#define REG_HDMI_8960_PHY_REG12 0x00000030 #define HDMI_8960_PHY_REG12_SW_RESET 0x00000020 #define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080 -#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000434 +#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000034 -#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000438 +#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000038 -#define REG_HDMI_8960_PHY_REG_MISC0 0x0000043c +#define REG_HDMI_8960_PHY_REG_MISC0 0x0000003c -#define REG_HDMI_8960_PHY_REG13 0x00000440 +#define REG_HDMI_8960_PHY_REG13 0x00000040 -#define REG_HDMI_8960_PHY_REG14 0x00000444 +#define REG_HDMI_8960_PHY_REG14 0x00000044 -#define REG_HDMI_8960_PHY_REG15 0x00000448 +#define REG_HDMI_8960_PHY_REG15 0x00000048 -#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000500 +#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000000 -#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000504 +#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000004 -#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000508 +#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000008 -#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000050c +#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000000c -#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000510 +#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000010 -#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000514 +#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000014 -#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000518 +#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000018 #define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002 #define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008 -#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000051c +#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000001c -#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000520 +#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000020 -#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000524 +#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000024 -#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000528 +#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000028 -#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000052c +#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000002c -#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000530 +#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000030 -#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000534 +#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000034 -#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000538 +#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000038 -#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000053c +#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000003c -#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000540 +#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000040 -#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000544 +#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000044 -#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000548 +#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000048 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000054c +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000004c -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000550 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000050 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000554 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000054 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000558 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000058 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000055c +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000005c -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000560 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000060 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000564 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000064 -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000568 +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000068 -#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000056c +#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000006c -#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000570 +#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000070 -#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000574 +#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000074 -#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000578 +#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000078 -#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000057c +#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000007c -#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000580 +#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000080 -#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000584 +#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000084 -#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000588 +#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000088 -#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000058c +#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000008c -#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000590 +#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000090 -#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000594 +#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000094 -#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000598 +#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000098 #define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001 -#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000059c +#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000009c #define REG_HDMI_8x74_ANA_CFG0 0x00000000 @@ -843,5 +844,501 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) #define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0 +#define REG_HDMI_8996_PHY_CFG 0x00000000 + +#define REG_HDMI_8996_PHY_PD_CTL 0x00000004 + +#define REG_HDMI_8996_PHY_MODE 0x00000008 + +#define REG_HDMI_8996_PHY_MISR_CLEAR 0x0000000c + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG0 0x00000010 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG1 0x00000014 + +#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE0 0x00000018 + +#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE1 0x0000001c + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN0 0x00000020 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN1 0x00000024 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG0 0x00000028 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG1 0x0000002c + +#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE0 0x00000030 + +#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE1 0x00000034 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN0 0x00000038 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN1 0x0000003c + +#define REG_HDMI_8996_PHY_DEBUG_BUS_SEL 0x00000040 + +#define REG_HDMI_8996_PHY_TXCAL_CFG0 0x00000044 + +#define REG_HDMI_8996_PHY_TXCAL_CFG1 0x00000048 + +#define REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL 0x0000004c + +#define REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL 0x00000050 + +#define REG_HDMI_8996_PHY_LANE_BIST_CONFIG 0x00000054 + +#define REG_HDMI_8996_PHY_CLOCK 0x00000058 + +#define REG_HDMI_8996_PHY_MISC1 0x0000005c + +#define REG_HDMI_8996_PHY_MISC2 0x00000060 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS0 0x00000064 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS1 0x00000068 + +#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS2 0x0000006c + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS0 0x00000070 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS1 0x00000074 + +#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS2 0x00000078 + +#define REG_HDMI_8996_PHY_PRE_MISR_STATUS0 0x0000007c + +#define REG_HDMI_8996_PHY_PRE_MISR_STATUS1 0x00000080 + +#define REG_HDMI_8996_PHY_PRE_MISR_STATUS2 0x00000084 + +#define REG_HDMI_8996_PHY_PRE_MISR_STATUS3 0x00000088 + +#define REG_HDMI_8996_PHY_POST_MISR_STATUS0 0x0000008c + +#define REG_HDMI_8996_PHY_POST_MISR_STATUS1 0x00000090 + +#define REG_HDMI_8996_PHY_POST_MISR_STATUS2 0x00000094 + +#define REG_HDMI_8996_PHY_POST_MISR_STATUS3 0x00000098 + +#define REG_HDMI_8996_PHY_STATUS 0x0000009c + +#define REG_HDMI_8996_PHY_MISC3_STATUS 0x000000a0 + +#define REG_HDMI_8996_PHY_MISC4_STATUS 0x000000a4 + +#define REG_HDMI_8996_PHY_DEBUG_BUS0 0x000000a8 + +#define REG_HDMI_8996_PHY_DEBUG_BUS1 0x000000ac + +#define REG_HDMI_8996_PHY_DEBUG_BUS2 0x000000b0 + +#define REG_HDMI_8996_PHY_DEBUG_BUS3 0x000000b4 + +#define REG_HDMI_8996_PHY_PHY_REVISION_ID0 0x000000b8 + +#define REG_HDMI_8996_PHY_PHY_REVISION_ID1 0x000000bc + +#define REG_HDMI_8996_PHY_PHY_REVISION_ID2 0x000000c0 + +#define REG_HDMI_8996_PHY_PHY_REVISION_ID3 0x000000c4 + +#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL1 0x00000000 + +#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL2 0x00000004 + +#define REG_HDMI_PHY_QSERDES_COM_FREQ_UPDATE 0x00000008 + +#define REG_HDMI_PHY_QSERDES_COM_BG_TIMER 0x0000000c + +#define REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER 0x00000010 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER1 0x00000014 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER2 0x00000018 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_PER1 0x0000001c + +#define REG_HDMI_PHY_QSERDES_COM_SSC_PER2 0x00000020 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1 0x00000024 + +#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2 0x00000028 + +#define REG_HDMI_PHY_QSERDES_COM_POST_DIV 0x0000002c + +#define REG_HDMI_PHY_QSERDES_COM_POST_DIV_MUX 0x00000030 + +#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x00000034 + +#define REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1 0x00000038 + +#define REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL 0x0000003c + +#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE 0x00000040 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_EN 0x00000044 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_IVCO 0x00000048 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0 0x0000004c + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0 0x00000050 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0 0x00000054 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE1 0x00000058 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE1 0x0000005c + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE1 0x00000060 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE2 0x00000064 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD0 0x00000064 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE2 0x00000068 + +#define REG_HDMI_PHY_QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x00000068 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE2 0x0000006c + +#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x0000006c + +#define REG_HDMI_PHY_QSERDES_COM_BG_TRIM 0x00000070 + +#define REG_HDMI_PHY_QSERDES_COM_CLK_EP_DIV 0x00000074 + +#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0 0x00000078 + +#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE1 0x0000007c + +#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE2 0x00000080 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD1 0x00000080 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0 0x00000084 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE1 0x00000088 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE2 0x0000008c + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD2 0x0000008c + +#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0 0x00000090 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE1 0x00000094 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE2 0x00000098 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD3 0x00000098 + +#define REG_HDMI_PHY_QSERDES_COM_PLL_CNTRL 0x0000009c + +#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_CTRL 0x000000a0 + +#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_DC 0x000000a4 + +#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x000000a8 + +#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x000000a8 + +#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL 0x000000ac + +#define REG_HDMI_PHY_QSERDES_COM_CML_SYSCLK_SEL 0x000000b0 + +#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL 0x000000b4 + +#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL2 0x000000b8 + +#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL 0x000000bc + +#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL2 0x000000c0 + +#define REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM 0x000000c4 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN 0x000000c8 + +#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_CFG 0x000000cc + +#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0 0x000000d0 + +#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE1 0x000000d4 + +#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE2 0x000000d8 + +#define REG_HDMI_PHY_QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x000000d8 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0 0x000000dc + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0 0x000000e0 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0 0x000000e4 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE1 0x000000e8 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE1 0x000000ec + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE1 0x000000f0 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE2 0x000000f4 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL1 0x000000f4 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE2 0x000000f8 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL2 0x000000f8 + +#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE2 0x000000fc + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD4 0x000000fc + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_INITVAL 0x00000100 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_EN 0x00000104 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00000108 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x0000010c + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00000110 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00000114 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE2 0x00000118 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL1 0x00000118 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE2 0x0000011c + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL2 0x0000011c + +#define REG_HDMI_PHY_QSERDES_COM_RES_TRIM_CONTROL2 0x00000120 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL 0x00000124 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP 0x00000128 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE0 0x0000012c + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE0 0x00000130 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE1 0x00000134 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE1 0x00000138 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE2 0x0000013c + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL1 0x0000013c + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE2 0x00000140 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL2 0x00000140 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER1 0x00000144 + +#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER2 0x00000148 + +#define REG_HDMI_PHY_QSERDES_COM_SAR 0x0000014c + +#define REG_HDMI_PHY_QSERDES_COM_SAR_CLK 0x00000150 + +#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_OUT_STATUS 0x00000154 + +#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_READY_STATUS 0x00000158 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_STATUS 0x0000015c + +#define REG_HDMI_PHY_QSERDES_COM_RESET_SM_STATUS 0x00000160 + +#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CODE_STATUS 0x00000164 + +#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE1_STATUS 0x00000168 + +#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE2_STATUS 0x0000016c + +#define REG_HDMI_PHY_QSERDES_COM_BG_CTRL 0x00000170 + +#define REG_HDMI_PHY_QSERDES_COM_CLK_SELECT 0x00000174 + +#define REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL 0x00000178 + +#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x0000017c + +#define REG_HDMI_PHY_QSERDES_COM_PLL_ANALOG 0x00000180 + +#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV 0x00000184 + +#define REG_HDMI_PHY_QSERDES_COM_SW_RESET 0x00000188 + +#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN 0x0000018c + +#define REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS 0x00000190 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG 0x00000194 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RATE_OVERRIDE 0x00000198 + +#define REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL 0x0000019c + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS0 0x000001a0 + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS1 0x000001a4 + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS2 0x000001a8 + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS3 0x000001ac + +#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS_SEL 0x000001b0 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC1 0x000001b4 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC2 0x000001b8 + +#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE1 0x000001bc + +#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE2 0x000001c0 + +#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD5 0x000001c4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_MODE_LANENO 0x00000000 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_INVERT 0x00000004 + +#define REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE 0x00000008 + +#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_ONE 0x0000000c + +#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_TWO 0x00000010 + +#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_THREE 0x00000014 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL 0x00000018 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POST2_EMPH 0x0000001c + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BOOST_LVL_UP_DN 0x00000020 + +#define REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES 0x00000024 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_IDLE_LVL_LARGE_AMP 0x00000028 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL 0x0000002c + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET 0x00000030 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN 0x00000034 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRE_STALL_LDO_BOOST_EN 0x00000038 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND 0x0000003c + +#define REG_HDMI_PHY_QSERDES_TX_LX_SLEW_CNTL 0x00000040 + +#define REG_HDMI_PHY_QSERDES_TX_LX_INTERFACE_SELECT 0x00000044 + +#define REG_HDMI_PHY_QSERDES_TX_LX_LPB_EN 0x00000048 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_TX 0x0000004c + +#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_RX 0x00000050 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET 0x00000054 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH1 0x00000058 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH2 0x0000005c + +#define REG_HDMI_PHY_QSERDES_TX_LX_SERDES_BYP_EN_OUT 0x00000060 + +#define REG_HDMI_PHY_QSERDES_TX_LX_DEBUG_BUS_SEL 0x00000064 + +#define REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x00000068 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POL_INV 0x0000006c + +#define REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN 0x00000070 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN1 0x00000074 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN2 0x00000078 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN3 0x0000007c + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN4 0x00000080 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN5 0x00000084 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN6 0x00000088 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN7 0x0000008c + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN8 0x00000090 + +#define REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE 0x00000094 + +#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE 0x00000098 + +#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x0000009c + +#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL1 0x000000a0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL2 0x000000a4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL 0x000000a8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL_2 0x000000ac + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED1 0x000000b0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED2 0x000000b4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED3 0x000000b8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED4 0x000000bc + +#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN 0x000000c0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN_MUXES 0x000000c4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN 0x000000c8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_INTERFACE_MODE 0x000000cc + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_CTRL 0x000000d0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_ENCODED_OR_DATA 0x000000d4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND2 0x000000d8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND2 0x000000dc + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND2 0x000000e0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND2 0x000000e4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND0_1 0x000000e8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND0_1 0x000000ec + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND0_1 0x000000f0 + +#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND0_1 0x000000f4 + +#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1 0x000000f8 + +#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2 0x000000fc + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV_CNTL 0x00000100 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_STATUS 0x00000104 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT1 0x00000108 + +#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT2 0x0000010c + +#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV 0x00000110 + #endif /* HDMI_XML */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c index df232e20c13e..a54d3bb5baad 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c @@ -89,7 +89,7 @@ static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock) return NULL; } -int hdmi_audio_update(struct hdmi *hdmi) +int msm_hdmi_audio_update(struct hdmi *hdmi) { struct hdmi_audio *audio = &hdmi->audio; struct hdmi_audio_infoframe *info = &audio->infoframe; @@ -232,7 +232,7 @@ int hdmi_audio_update(struct hdmi *hdmi) return 0; } -int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, +int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, uint32_t num_of_channels, uint32_t channel_allocation, uint32_t level_shift, bool down_mix) { @@ -252,10 +252,10 @@ int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, audio->infoframe.level_shift_value = level_shift; audio->infoframe.downmix_inhibit = down_mix; - return hdmi_audio_update(hdmi); + return msm_hdmi_audio_update(hdmi); } -void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate) +void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate) { struct hdmi_audio *audio; @@ -268,5 +268,5 @@ void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate) return; audio->rate = rate; - hdmi_audio_update(hdmi); + msm_hdmi_audio_update(hdmi); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 92b69ae8caf9..bacbd5d8df0e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -23,11 +23,11 @@ struct hdmi_bridge { }; #define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base) -void hdmi_bridge_destroy(struct drm_bridge *bridge) +void msm_hdmi_bridge_destroy(struct drm_bridge *bridge) { } -static void power_on(struct drm_bridge *bridge) +static void msm_hdmi_power_on(struct drm_bridge *bridge) { struct drm_device *dev = bridge->dev; struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); @@ -86,7 +86,7 @@ static void power_off(struct drm_bridge *bridge) } } -static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) +static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; @@ -95,51 +95,51 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) DBG("power up"); if (!hdmi->power_on) { - power_on(bridge); + msm_hdmi_phy_resource_enable(phy); + msm_hdmi_power_on(bridge); hdmi->power_on = true; - hdmi_audio_update(hdmi); + msm_hdmi_audio_update(hdmi); } - if (phy) - phy->funcs->powerup(phy, hdmi->pixclock); + msm_hdmi_phy_powerup(phy, hdmi->pixclock); - hdmi_set_mode(hdmi, true); + msm_hdmi_set_mode(hdmi, true); if (hdmi->hdcp_ctrl) - hdmi_hdcp_on(hdmi->hdcp_ctrl); + msm_hdmi_hdcp_on(hdmi->hdcp_ctrl); } -static void hdmi_bridge_enable(struct drm_bridge *bridge) +static void msm_hdmi_bridge_enable(struct drm_bridge *bridge) { } -static void hdmi_bridge_disable(struct drm_bridge *bridge) +static void msm_hdmi_bridge_disable(struct drm_bridge *bridge) { } -static void hdmi_bridge_post_disable(struct drm_bridge *bridge) +static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; struct hdmi_phy *phy = hdmi->phy; if (hdmi->hdcp_ctrl) - hdmi_hdcp_off(hdmi->hdcp_ctrl); + msm_hdmi_hdcp_off(hdmi->hdcp_ctrl); DBG("power down"); - hdmi_set_mode(hdmi, false); + msm_hdmi_set_mode(hdmi, false); - if (phy) - phy->funcs->powerdown(phy); + msm_hdmi_phy_powerdown(phy); if (hdmi->power_on) { power_off(bridge); hdmi->power_on = false; - hdmi_audio_update(hdmi); + msm_hdmi_audio_update(hdmi); + msm_hdmi_phy_resource_disable(phy); } } -static void hdmi_bridge_mode_set(struct drm_bridge *bridge, +static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { @@ -196,20 +196,20 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge, DBG("frame_ctrl=%08x", frame_ctrl); hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl); - hdmi_audio_update(hdmi); + msm_hdmi_audio_update(hdmi); } -static const struct drm_bridge_funcs hdmi_bridge_funcs = { - .pre_enable = hdmi_bridge_pre_enable, - .enable = hdmi_bridge_enable, - .disable = hdmi_bridge_disable, - .post_disable = hdmi_bridge_post_disable, - .mode_set = hdmi_bridge_mode_set, +static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = { + .pre_enable = msm_hdmi_bridge_pre_enable, + .enable = msm_hdmi_bridge_enable, + .disable = msm_hdmi_bridge_disable, + .post_disable = msm_hdmi_bridge_post_disable, + .mode_set = msm_hdmi_bridge_mode_set, }; /* initialize bridge */ -struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) +struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi) { struct drm_bridge *bridge = NULL; struct hdmi_bridge *hdmi_bridge; @@ -225,7 +225,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) hdmi_bridge->hdmi = hdmi; bridge = &hdmi_bridge->base; - bridge->funcs = &hdmi_bridge_funcs; + bridge->funcs = &msm_hdmi_bridge_funcs; ret = drm_bridge_attach(hdmi->dev, bridge); if (ret) @@ -235,7 +235,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) fail: if (bridge) - hdmi_bridge_destroy(bridge); + msm_hdmi_bridge_destroy(bridge); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index a3b05ae52dae..26129bff2dd6 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -28,7 +28,7 @@ struct hdmi_connector { }; #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) -static void hdmi_phy_reset(struct hdmi *hdmi) +static void msm_hdmi_phy_reset(struct hdmi *hdmi) { unsigned int val; @@ -81,114 +81,54 @@ static int gpio_config(struct hdmi *hdmi, bool on) { struct device *dev = &hdmi->pdev->dev; const struct hdmi_platform_config *config = hdmi->config; - int ret; + int ret, i; if (on) { - if (config->ddc_clk_gpio != -1) { - ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); - goto error1; - } - gpio_set_value_cansleep(config->ddc_clk_gpio, 1); - } - - if (config->ddc_data_gpio != -1) { - ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_DDC_DATA", config->ddc_data_gpio, ret); - goto error2; - } - gpio_set_value_cansleep(config->ddc_data_gpio, 1); - } - - ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_HPD", config->hpd_gpio, ret); - goto error3; - } - gpio_direction_input(config->hpd_gpio); - gpio_set_value_cansleep(config->hpd_gpio, 1); - - if (config->mux_en_gpio != -1) { - ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_MUX_EN", config->mux_en_gpio, ret); - goto error4; - } - gpio_set_value_cansleep(config->mux_en_gpio, 1); - } - - if (config->mux_sel_gpio != -1) { - ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL"); - if (ret) { - dev_err(dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_MUX_SEL", config->mux_sel_gpio, ret); - goto error5; + for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { + struct hdmi_gpio_data gpio = config->gpios[i]; + + if (gpio.num != -1) { + ret = gpio_request(gpio.num, gpio.label); + if (ret) { + dev_err(dev, + "'%s'(%d) gpio_request failed: %d\n", + gpio.label, gpio.num, ret); + goto err; + } + + if (gpio.output) { + gpio_direction_output(gpio.num, + gpio.value); + } else { + gpio_direction_input(gpio.num); + gpio_set_value_cansleep(gpio.num, + gpio.value); + } } - gpio_set_value_cansleep(config->mux_sel_gpio, 0); } - if (config->mux_lpm_gpio != -1) { - ret = gpio_request(config->mux_lpm_gpio, - "HDMI_MUX_LPM"); - if (ret) { - dev_err(dev, - "'%s'(%d) gpio_request failed: %d\n", - "HDMI_MUX_LPM", - config->mux_lpm_gpio, ret); - goto error6; - } - gpio_set_value_cansleep(config->mux_lpm_gpio, 1); - } DBG("gpio on"); } else { - if (config->ddc_clk_gpio != -1) - gpio_free(config->ddc_clk_gpio); - - if (config->ddc_data_gpio != -1) - gpio_free(config->ddc_data_gpio); + for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { + struct hdmi_gpio_data gpio = config->gpios[i]; - gpio_free(config->hpd_gpio); + if (gpio.output) { + int value = gpio.value ? 0 : 1; - if (config->mux_en_gpio != -1) { - gpio_set_value_cansleep(config->mux_en_gpio, 0); - gpio_free(config->mux_en_gpio); - } + gpio_set_value_cansleep(gpio.num, value); + } - if (config->mux_sel_gpio != -1) { - gpio_set_value_cansleep(config->mux_sel_gpio, 1); - gpio_free(config->mux_sel_gpio); - } + gpio_free(gpio.num); + }; - if (config->mux_lpm_gpio != -1) { - gpio_set_value_cansleep(config->mux_lpm_gpio, 0); - gpio_free(config->mux_lpm_gpio); - } DBG("gpio off"); } return 0; +err: + while (i--) + gpio_free(config->gpios[i].num); -error6: - if (config->mux_sel_gpio != -1) - gpio_free(config->mux_sel_gpio); -error5: - if (config->mux_en_gpio != -1) - gpio_free(config->mux_en_gpio); -error4: - gpio_free(config->hpd_gpio); -error3: - if (config->ddc_data_gpio != -1) - gpio_free(config->ddc_data_gpio); -error2: - if (config->ddc_clk_gpio != -1) - gpio_free(config->ddc_clk_gpio); -error1: return ret; } @@ -239,9 +179,9 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) } } - hdmi_set_mode(hdmi, false); - hdmi_phy_reset(hdmi); - hdmi_set_mode(hdmi, true); + msm_hdmi_set_mode(hdmi, false); + msm_hdmi_phy_reset(hdmi); + msm_hdmi_set_mode(hdmi, true); hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b); @@ -278,7 +218,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) /* Disable HPD interrupt */ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); - hdmi_set_mode(hdmi, false); + msm_hdmi_set_mode(hdmi, false); for (i = 0; i < config->hpd_clk_cnt; i++) clk_disable_unprepare(hdmi->hpd_clks[i]); @@ -300,7 +240,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) } static void -hotplug_work(struct work_struct *work) +msm_hdmi_hotplug_work(struct work_struct *work) { struct hdmi_connector *hdmi_connector = container_of(work, struct hdmi_connector, hpd_work); @@ -308,7 +248,7 @@ hotplug_work(struct work_struct *work) drm_helper_hpd_irq_event(connector->dev); } -void hdmi_connector_irq(struct drm_connector *connector) +void msm_hdmi_connector_irq(struct drm_connector *connector) { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi *hdmi = hdmi_connector->hdmi; @@ -345,10 +285,13 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi) connector_status_connected : connector_status_disconnected; } +#define HPD_GPIO_INDEX 2 static enum drm_connector_status detect_gpio(struct hdmi *hdmi) { const struct hdmi_platform_config *config = hdmi->config; - return gpio_get_value(config->hpd_gpio) ? + struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; + + return gpio_get_value(hpd_gpio.num) ? connector_status_connected : connector_status_disconnected; } @@ -358,9 +301,18 @@ static enum drm_connector_status hdmi_connector_detect( { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi *hdmi = hdmi_connector->hdmi; + const struct hdmi_platform_config *config = hdmi->config; + struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; enum drm_connector_status stat_gpio, stat_reg; int retry = 20; + /* + * some platforms may not have hpd gpio. Rely only on the status + * provided by REG_HDMI_HPD_INT_STATUS in this case. + */ + if (hpd_gpio.num == -1) + return detect_reg(hdmi); + do { stat_gpio = detect_gpio(hdmi); stat_reg = detect_reg(hdmi); @@ -395,7 +347,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector) kfree(hdmi_connector); } -static int hdmi_connector_get_modes(struct drm_connector *connector) +static int msm_hdmi_connector_get_modes(struct drm_connector *connector) { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi *hdmi = hdmi_connector->hdmi; @@ -421,7 +373,7 @@ static int hdmi_connector_get_modes(struct drm_connector *connector) return ret; } -static int hdmi_connector_mode_valid(struct drm_connector *connector, +static int msm_hdmi_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); @@ -451,7 +403,7 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector, } static struct drm_encoder * -hdmi_connector_best_encoder(struct drm_connector *connector) +msm_hdmi_connector_best_encoder(struct drm_connector *connector) { struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); return hdmi_connector->hdmi->encoder; @@ -467,14 +419,14 @@ static const struct drm_connector_funcs hdmi_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { - .get_modes = hdmi_connector_get_modes, - .mode_valid = hdmi_connector_mode_valid, - .best_encoder = hdmi_connector_best_encoder, +static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { + .get_modes = msm_hdmi_connector_get_modes, + .mode_valid = msm_hdmi_connector_mode_valid, + .best_encoder = msm_hdmi_connector_best_encoder, }; /* initialize connector */ -struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) +struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) { struct drm_connector *connector = NULL; struct hdmi_connector *hdmi_connector; @@ -487,13 +439,13 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) } hdmi_connector->hdmi = hdmi; - INIT_WORK(&hdmi_connector->hpd_work, hotplug_work); + INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work); connector = &hdmi_connector->base; drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); - drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); + drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs); connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c index 1dc9c34eb0df..0baaaaabd002 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c @@ -84,7 +84,7 @@ struct hdmi_hdcp_ctrl { bool max_dev_exceeded; }; -static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, +static int msm_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, u8 *data, u16 data_len) { int rc; @@ -122,7 +122,7 @@ retry: #define HDCP_DDC_WRITE_MAX_BYTE_NUM 32 -static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, +static int msm_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, u8 *data, u16 data_len) { int rc; @@ -162,7 +162,7 @@ retry: return rc; } -static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg, +static int msm_hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg, u32 *pdata, u32 count) { struct hdmi *hdmi = hdcp_ctrl->hdmi; @@ -202,7 +202,7 @@ static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg, return ret; } -void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) +void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; u32 reg_val, hdcp_int_status; @@ -247,7 +247,7 @@ void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) } } -static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev) +static int msm_hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev) { int rc; @@ -264,7 +264,7 @@ static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev) return 0; } -static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; @@ -287,7 +287,7 @@ static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) return 0; } -static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; u32 reg_val, failure, nack0; @@ -337,7 +337,7 @@ static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl) reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET; hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val); - rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET; @@ -350,7 +350,7 @@ static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl) /* If previous msleep is aborted, skip this msleep */ if (!rc) - rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL); reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET; @@ -362,7 +362,7 @@ static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl) return rc; } -static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc; u32 hdcp_ddc_status, ddc_hw_status; @@ -394,7 +394,7 @@ static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl) return -ETIMEDOUT; } - rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); if (rc) return rc; } while (1); @@ -402,7 +402,7 @@ static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl) return 0; } -static void hdmi_hdcp_reauth_work(struct work_struct *work) +static void msm_hdmi_hdcp_reauth_work(struct work_struct *work) { struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work, struct hdmi_hdcp_ctrl, hdcp_reauth_work); @@ -430,7 +430,7 @@ static void hdmi_hdcp_reauth_work(struct work_struct *work) HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE); /* Wait to be clean on DDC HW engine */ - if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) { + if (msm_hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) { pr_info("%s: reauth work aborted\n", __func__); return; } @@ -461,7 +461,7 @@ static void hdmi_hdcp_reauth_work(struct work_struct *work) queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work); } -static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; u32 link0_status; @@ -470,7 +470,7 @@ static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl) int rc; if (!hdcp_ctrl->aksv_valid) { - rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl); + rc = msm_hdmi_hdcp_read_validate_aksv(hdcp_ctrl); if (rc) { pr_err("%s: ASKV validation failed\n", __func__); hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV; @@ -538,12 +538,12 @@ static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl) DBG("An not ready after enabling HDCP"); /* Clear any DDC failures from previous tries before enable HDCP*/ - rc = reset_hdcp_ddc_failures(hdcp_ctrl); + rc = msm_reset_hdcp_ddc_failures(hdcp_ctrl); return rc; } -static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static void msm_hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; u32 reg_val; @@ -561,7 +561,7 @@ static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl) queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work); } -static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static void msm_hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; u32 reg_val; @@ -596,7 +596,7 @@ static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl) * Write An and AKSV to sink * Read BKSV from sink and write into HDCP engine */ -static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc; struct hdmi *hdmi = hdcp_ctrl->hdmi; @@ -621,7 +621,7 @@ static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl) return -ETIMEDOUT; } - rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); if (rc) return rc; } while (1); @@ -643,7 +643,7 @@ static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl) return -ETIMEDOUT; } - rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); if (rc) return rc; } while (1); @@ -651,7 +651,7 @@ static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl) return 0; } -static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc = 0; struct hdmi *hdmi = hdcp_ctrl->hdmi; @@ -676,7 +676,7 @@ static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl) aksv[4] = link0_aksv_1 & 0xFF; /* Write An to offset 0x18 */ - rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an, + rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an, (u16)sizeof(link0_an)); if (rc) { pr_err("%s:An write failed\n", __func__); @@ -685,7 +685,7 @@ static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl) DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]); /* Write AKSV to offset 0x10 */ - rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5); + rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5); if (rc) { pr_err("%s:AKSV write failed\n", __func__); return rc; @@ -695,7 +695,7 @@ static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl) return 0; } -static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc = 0; struct hdmi *hdmi = hdcp_ctrl->hdmi; @@ -703,7 +703,7 @@ static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) u32 reg[2], data[2]; /* Read BKSV at offset 0x00 */ - rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5); + rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5); if (rc) { pr_err("%s:BKSV read failed\n", __func__); return rc; @@ -728,19 +728,19 @@ static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl) data[0] = hdcp_ctrl->bksv_lsb; reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1; data[1] = hdcp_ctrl->bksv_msb; - rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2); + rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2); return rc; } -static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc = 0; struct hdmi *hdmi = hdcp_ctrl->hdmi; u32 reg, data; u8 bcaps; - rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1); + rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1); if (rc) { pr_err("%s:BCAPS read failed\n", __func__); return rc; @@ -753,26 +753,26 @@ static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl) /* Write BCAPS to the hardware */ reg = REG_HDMI_HDCP_RCVPORT_DATA12; data = (u32)bcaps; - rc = hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); + rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); return rc; } -static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; unsigned long flags; int rc; /* Wait for AKSV key and An ready */ - rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl); + rc = msm_hdmi_hdcp_wait_key_an_ready(hdcp_ctrl); if (rc) { pr_err("%s: wait key and an ready failed\n", __func__); return rc; }; /* Read BCAPS and send to HDCP engine */ - rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl); + rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl); if (rc) { pr_err("%s: read bcaps error, abort\n", __func__); return rc; @@ -785,14 +785,14 @@ static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl) hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0); /* Send AKSV and An to sink */ - rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl); + rc = msm_hdmi_hdcp_send_aksv_an(hdcp_ctrl); if (rc) { pr_err("%s:An/Aksv write failed\n", __func__); return rc; } /* Read BKSV and send to HDCP engine*/ - rc = hdmi_hdcp_recv_bksv(hdcp_ctrl); + rc = msm_hdmi_hdcp_recv_bksv(hdcp_ctrl); if (rc) { pr_err("%s:BKSV Process failed\n", __func__); return rc; @@ -812,7 +812,7 @@ static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl) } /* read R0' from sink and pass it to HDCP engine */ -static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; int rc = 0; @@ -822,12 +822,12 @@ static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) * HDCP Compliance Test case 1A-01: * Wait here at least 100ms before reading R0' */ - rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV); if (rc) return rc; /* Read R0' at offset 0x08 */ - rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2); + rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2); if (rc) { pr_err("%s:R0' read failed\n", __func__); return rc; @@ -842,14 +842,14 @@ static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) } /* Wait for authenticating result: R0/R0' are matched or not */ -static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; u32 link0_status; int rc; /* wait for hdcp irq, 10 sec should be long enough */ - rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV); if (!rc) { pr_err("%s: Wait Auth IRQ timeout\n", __func__); return -ETIMEDOUT; @@ -869,7 +869,7 @@ static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl) return 0; } -static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl, +static int msm_hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl, u16 *pbstatus) { int rc; @@ -880,7 +880,7 @@ static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl, u8 buf[2]; /* Read BSTATUS at offset 0x41 */ - rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2); + rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2); if (rc) { pr_err("%s: BSTATUS read failed\n", __func__); goto error; @@ -936,7 +936,7 @@ error: return rc; } -static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready( +static int msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready( struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc; @@ -953,7 +953,7 @@ static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready( timeout_count = 100; do { /* Read BCAPS at offset 0x40 */ - rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1); + rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1); if (rc) { pr_err("%s: BCAPS read failed\n", __func__); return rc; @@ -968,12 +968,12 @@ static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready( return -ETIMEDOUT; } - rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); if (rc) return rc; } while (1); - rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus); + rc = msm_hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus); if (rc) { pr_err("%s: bstatus error\n", __func__); return rc; @@ -982,7 +982,7 @@ static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready( /* Write BSTATUS and BCAPS to HDCP registers */ reg = REG_HDMI_HDCP_RCVPORT_DATA12; data = bcaps | (bstatus << 8); - rc = hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); + rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); if (rc) { pr_err("%s: BSTATUS write failed\n", __func__); return rc; @@ -997,7 +997,7 @@ static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready( * transfer V' from sink to HDCP engine * reset SHA engine */ -static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; int rc = 0; @@ -1016,7 +1016,7 @@ static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl) for (i = 0; i < size; i++) { rd = ®_data[i]; - rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, + rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, rd->off, (u8 *)&data[i], (u16)sizeof(data[i])); if (rc) { pr_err("%s: Read %s failed\n", __func__, rd->name); @@ -1027,13 +1027,13 @@ static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl) reg[i] = reg_data[i].reg_id; } - rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size); + rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size); error: return rc; } -static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc; struct hdmi *hdmi = hdcp_ctrl->hdmi; @@ -1041,7 +1041,7 @@ static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) ksv_bytes = 5 * hdcp_ctrl->dev_count; - rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43, + rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43, hdcp_ctrl->ksv_list, ksv_bytes); if (rc) pr_err("%s: KSV FIFO read failed\n", __func__); @@ -1049,7 +1049,7 @@ static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) return rc; } -static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl) { u32 reg[2], data[2]; u32 rc = 0; @@ -1059,12 +1059,12 @@ static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl) reg[1] = REG_HDMI_HDCP_SHA_CTRL; data[1] = HDCP_REG_DISABLE; - rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2); + rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2); return rc; } -static int hdmi_hdcp_auth_part2_recv_ksv_fifo( +static int msm_hdmi_hdcp_auth_part2_recv_ksv_fifo( struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc; @@ -1081,7 +1081,7 @@ static int hdmi_hdcp_auth_part2_recv_ksv_fifo( */ timeout_count = 100; do { - rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl); + rc = msm_hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl); if (!rc) break; @@ -1091,19 +1091,19 @@ static int hdmi_hdcp_auth_part2_recv_ksv_fifo( return -ETIMEDOUT; } - rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV); if (rc) return rc; } while (1); - rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl); + rc = msm_hdmi_hdcp_transfer_v_h(hdcp_ctrl); if (rc) { pr_err("%s: transfer V failed\n", __func__); return rc; } /* reset SHA engine before write ksv fifo */ - rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl); + rc = msm_hdmi_hdcp_reset_sha_engine(hdcp_ctrl); if (rc) { pr_err("%s: fail to reset sha engine\n", __func__); return rc; @@ -1120,7 +1120,7 @@ static int hdmi_hdcp_auth_part2_recv_ksv_fifo( * If the last byte is written, we need to poll for * HDCP_SHA_COMP_DONE to wait until HW finish */ -static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) { int i; struct hdmi *hdmi = hdcp_ctrl->hdmi; @@ -1169,7 +1169,7 @@ static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) reg = REG_HDMI_HDCP_SHA_DATA; data = reg_val; - rc = hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); + rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, ®, &data, 1); if (rc) return rc; @@ -1184,7 +1184,7 @@ static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl) } /* write ksv fifo into HDCP engine */ -static int hdmi_hdcp_auth_part2_write_ksv_fifo( +static int msm_hdmi_hdcp_auth_part2_write_ksv_fifo( struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc; @@ -1193,7 +1193,7 @@ static int hdmi_hdcp_auth_part2_write_ksv_fifo( hdcp_ctrl->ksv_fifo_w_index = 0; timeout_count = 100; do { - rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl); + rc = msm_hdmi_hdcp_write_ksv_fifo(hdcp_ctrl); if (!rc) break; @@ -1206,7 +1206,7 @@ static int hdmi_hdcp_auth_part2_write_ksv_fifo( return -ETIMEDOUT; } - rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); if (rc) return rc; } while (1); @@ -1214,7 +1214,7 @@ static int hdmi_hdcp_auth_part2_write_ksv_fifo( return 0; } -static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl) +static int msm_hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl) { int rc = 0; struct hdmi *hdmi = hdcp_ctrl->hdmi; @@ -1232,7 +1232,7 @@ static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl) return -ETIMEDOUT; } - rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); + rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV); if (rc) return rc; } while (1); @@ -1240,32 +1240,32 @@ static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl) return 0; } -static void hdmi_hdcp_auth_work(struct work_struct *work) +static void msm_hdmi_hdcp_auth_work(struct work_struct *work) { struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work, struct hdmi_hdcp_ctrl, hdcp_auth_work); int rc; - rc = hdmi_hdcp_auth_prepare(hdcp_ctrl); + rc = msm_hdmi_hdcp_auth_prepare(hdcp_ctrl); if (rc) { pr_err("%s: auth prepare failed %d\n", __func__, rc); goto end; } /* HDCP PartI */ - rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl); + rc = msm_hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl); if (rc) { pr_err("%s: key exchange failed %d\n", __func__, rc); goto end; } - rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl); + rc = msm_hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl); if (rc) { pr_err("%s: receive r0 failed %d\n", __func__, rc); goto end; } - rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl); + rc = msm_hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl); if (rc) { pr_err("%s: verify r0 failed %d\n", __func__, rc); goto end; @@ -1275,25 +1275,25 @@ static void hdmi_hdcp_auth_work(struct work_struct *work) goto end; /* HDCP PartII */ - rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl); + rc = msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl); if (rc) { pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc); goto end; } - rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl); + rc = msm_hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl); if (rc) { pr_err("%s: recv ksv fifo failed %d\n", __func__, rc); goto end; } - rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl); + rc = msm_hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl); if (rc) { pr_err("%s: write ksv fifo failed %d\n", __func__, rc); goto end; } - rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl); + rc = msm_hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl); if (rc) pr_err("%s: check v match failed %d\n", __func__, rc); @@ -1304,13 +1304,13 @@ end: pr_info("%s: hdcp is not supported\n", __func__); } else if (rc) { pr_err("%s: hdcp authentication failed\n", __func__); - hdmi_hdcp_auth_fail(hdcp_ctrl); + msm_hdmi_hdcp_auth_fail(hdcp_ctrl); } else { - hdmi_hdcp_auth_done(hdcp_ctrl); + msm_hdmi_hdcp_auth_done(hdcp_ctrl); } } -void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) +void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; u32 reg_val; @@ -1335,7 +1335,7 @@ void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work); } -void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) +void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) { struct hdmi *hdmi = hdcp_ctrl->hdmi; unsigned long flags; @@ -1399,7 +1399,7 @@ void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) DBG("HDCP: Off"); } -struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi) +struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi) { struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL; @@ -1413,8 +1413,8 @@ struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi) if (!hdcp_ctrl) return ERR_PTR(-ENOMEM); - INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work); - INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work); + INIT_WORK(&hdcp_ctrl->hdcp_auth_work, msm_hdmi_hdcp_auth_work); + INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, msm_hdmi_hdcp_reauth_work); init_waitqueue_head(&hdcp_ctrl->auth_event_queue); hdcp_ctrl->hdmi = hdmi; hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE; @@ -1428,7 +1428,7 @@ struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi) return hdcp_ctrl; } -void hdmi_hdcp_destroy(struct hdmi *hdmi) +void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) { if (hdmi && hdmi->hdcp_ctrl) { kfree(hdmi->hdcp_ctrl); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c index f4ab7f70fed1..de9007e72f4e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c @@ -97,7 +97,7 @@ static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c) return hdmi_i2c->sw_done; } -static int hdmi_i2c_xfer(struct i2c_adapter *i2c, +static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, int num) { struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c); @@ -216,17 +216,17 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c, return i; } -static u32 hdmi_i2c_func(struct i2c_adapter *adapter) +static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static const struct i2c_algorithm hdmi_i2c_algorithm = { - .master_xfer = hdmi_i2c_xfer, - .functionality = hdmi_i2c_func, +static const struct i2c_algorithm msm_hdmi_i2c_algorithm = { + .master_xfer = msm_hdmi_i2c_xfer, + .functionality = msm_hdmi_i2c_func, }; -void hdmi_i2c_irq(struct i2c_adapter *i2c) +void msm_hdmi_i2c_irq(struct i2c_adapter *i2c) { struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c); @@ -234,14 +234,14 @@ void hdmi_i2c_irq(struct i2c_adapter *i2c) wake_up_all(&hdmi_i2c->ddc_event); } -void hdmi_i2c_destroy(struct i2c_adapter *i2c) +void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c) { struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c); i2c_del_adapter(i2c); kfree(hdmi_i2c); } -struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi) +struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi) { struct drm_device *dev = hdmi->dev; struct hdmi_i2c_adapter *hdmi_i2c; @@ -264,7 +264,7 @@ struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi) i2c->class = I2C_CLASS_DDC; snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c"); i2c->dev.parent = &hdmi->pdev->dev; - i2c->algo = &hdmi_i2c_algorithm; + i2c->algo = &msm_hdmi_i2c_algorithm; ret = i2c_add_adapter(i2c); if (ret) { @@ -276,6 +276,6 @@ struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi) fail: if (i2c) - hdmi_i2c_destroy(i2c); + msm_hdmi_i2c_destroy(i2c); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c new file mode 100644 index 000000000000..534ce5b49781 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of_device.h> + +#include "hdmi.h" + +static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy) +{ + struct hdmi_phy_cfg *cfg = phy->cfg; + struct device *dev = &phy->pdev->dev; + int i, ret; + + phy->regs = devm_kzalloc(dev, sizeof(phy->regs[0]) * cfg->num_regs, + GFP_KERNEL); + if (!phy->regs) + return -ENOMEM; + + phy->clks = devm_kzalloc(dev, sizeof(phy->clks[0]) * cfg->num_clks, + GFP_KERNEL); + if (!phy->clks) + return -ENOMEM; + + for (i = 0; i < cfg->num_regs; i++) { + struct regulator *reg; + + reg = devm_regulator_get(dev, cfg->reg_names[i]); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + dev_err(dev, "failed to get phy regulator: %s (%d)\n", + cfg->reg_names[i], ret); + return ret; + } + + phy->regs[i] = reg; + } + + for (i = 0; i < cfg->num_clks; i++) { + struct clk *clk; + + clk = devm_clk_get(dev, cfg->clk_names[i]); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(dev, "failed to get phy clock: %s (%d)\n", + cfg->clk_names[i], ret); + return ret; + } + + phy->clks[i] = clk; + } + + return 0; +} + +int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy) +{ + struct hdmi_phy_cfg *cfg = phy->cfg; + struct device *dev = &phy->pdev->dev; + int i, ret = 0; + + pm_runtime_get_sync(dev); + + for (i = 0; i < cfg->num_regs; i++) { + ret = regulator_enable(phy->regs[i]); + if (ret) + dev_err(dev, "failed to enable regulator: %s (%d)\n", + cfg->reg_names[i], ret); + } + + for (i = 0; i < cfg->num_clks; i++) { + ret = clk_prepare_enable(phy->clks[i]); + if (ret) + dev_err(dev, "failed to enable clock: %s (%d)\n", + cfg->clk_names[i], ret); + } + + return ret; +} + +void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy) +{ + struct hdmi_phy_cfg *cfg = phy->cfg; + struct device *dev = &phy->pdev->dev; + int i; + + for (i = cfg->num_clks - 1; i >= 0; i--) + clk_disable_unprepare(phy->clks[i]); + + for (i = cfg->num_regs - 1; i >= 0; i--) + regulator_disable(phy->regs[i]); + + pm_runtime_put_sync(dev); +} + +void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock) +{ + if (!phy || !phy->cfg->powerup) + return; + + phy->cfg->powerup(phy, pixclock); +} + +void msm_hdmi_phy_powerdown(struct hdmi_phy *phy) +{ + if (!phy || !phy->cfg->powerdown) + return; + + phy->cfg->powerdown(phy); +} + +static int msm_hdmi_phy_pll_init(struct platform_device *pdev, + enum hdmi_phy_type type) +{ + int ret; + + switch (type) { + case MSM_HDMI_PHY_8960: + ret = msm_hdmi_pll_8960_init(pdev); + break; + case MSM_HDMI_PHY_8996: + ret = msm_hdmi_pll_8996_init(pdev); + break; + /* + * we don't have PLL support for these, don't report an error for now + */ + case MSM_HDMI_PHY_8x60: + case MSM_HDMI_PHY_8x74: + default: + ret = 0; + break; + } + + return ret; +} + +static int msm_hdmi_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hdmi_phy *phy; + int ret; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENODEV; + + phy->cfg = (struct hdmi_phy_cfg *)of_device_get_match_data(dev); + if (!phy->cfg) + return -ENODEV; + + phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY"); + if (IS_ERR(phy->mmio)) { + dev_err(dev, "%s: failed to map phy base\n", __func__); + return -ENOMEM; + } + + phy->pdev = pdev; + + ret = msm_hdmi_phy_resource_init(phy); + if (ret) + return ret; + + pm_runtime_enable(&pdev->dev); + + ret = msm_hdmi_phy_resource_enable(phy); + if (ret) + return ret; + + ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type); + if (ret) { + dev_err(dev, "couldn't init PLL\n"); + msm_hdmi_phy_resource_disable(phy); + return ret; + } + + msm_hdmi_phy_resource_disable(phy); + + platform_set_drvdata(pdev, phy); + + return 0; +} + +static int msm_hdmi_phy_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id msm_hdmi_phy_dt_match[] = { + { .compatible = "qcom,hdmi-phy-8660", + .data = &msm_hdmi_phy_8x60_cfg }, + { .compatible = "qcom,hdmi-phy-8960", + .data = &msm_hdmi_phy_8960_cfg }, + { .compatible = "qcom,hdmi-phy-8974", + .data = &msm_hdmi_phy_8x74_cfg }, + { .compatible = "qcom,hdmi-phy-8084", + .data = &msm_hdmi_phy_8x74_cfg }, + { .compatible = "qcom,hdmi-phy-8996", + .data = &msm_hdmi_phy_8996_cfg }, + {} +}; + +static struct platform_driver msm_hdmi_phy_platform_driver = { + .probe = msm_hdmi_phy_probe, + .remove = msm_hdmi_phy_remove, + .driver = { + .name = "msm_hdmi_phy", + .of_match_table = msm_hdmi_phy_dt_match, + }, +}; + +void __init msm_hdmi_phy_driver_register(void) +{ + platform_driver_register(&msm_hdmi_phy_platform_driver); +} + +void __exit msm_hdmi_phy_driver_unregister(void) +{ + platform_driver_unregister(&msm_hdmi_phy_platform_driver); +} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c index 3a01cb5051e2..e6ee6b745ab7 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c @@ -15,495 +15,48 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ -#ifdef CONFIG_COMMON_CLK -#include <linux/clk.h> -#include <linux/clk-provider.h> -#endif - #include "hdmi.h" -struct hdmi_phy_8960 { - struct hdmi_phy base; - struct hdmi *hdmi; -#ifdef CONFIG_COMMON_CLK - struct clk_hw pll_hw; - struct clk *pll; - unsigned long pixclk; -#endif -}; -#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base) - -#ifdef CONFIG_COMMON_CLK -#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw) - -/* - * HDMI PLL: - * - * To get the parent clock setup properly, we need to plug in hdmi pll - * configuration into common-clock-framework. - */ - -struct pll_rate { - unsigned long rate; - struct { - uint32_t val; - uint32_t reg; - } conf[32]; -}; - -/* NOTE: keep sorted highest freq to lowest: */ -static const struct pll_rate freqtbl[] = { - { 154000000, { - { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0, 0 } } - }, - /* 1080p60/1080p50 case */ - { 148500000, { - { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, - { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, - { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, - { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, - { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, - { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, - { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, - { 0, 0 } } - }, - { 108000000, { - { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0, 0 } } - }, - /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */ - { 74250000, { - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0, 0 } } - }, - { 74176000, { - { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0, 0 } } - }, - { 65000000, { - { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0, 0 } } - }, - /* 480p60/480i60 */ - { 27030000, { - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, - { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, - { 0, 0 } } - }, - /* 576p50/576i50 */ - { 27000000, { - { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, - { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, - { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, - { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, - { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, - { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, - { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, - { 0, 0 } } - }, - /* 640x480p60 */ - { 25200000, { - { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, - { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, - { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, - { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, - { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, - { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, - { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, - { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, - { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, - { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, - { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, - { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, - { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, - { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, - { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, - { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, - { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, - { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, - { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, - { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, - { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, - { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, - { 0, 0 } } - }, -}; - -static int hdmi_pll_enable(struct clk_hw *hw) -{ - struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); - struct hdmi *hdmi = phy_8960->hdmi; - int timeout_count, pll_lock_retry = 10; - unsigned int val; - - DBG(""); - - /* Assert PLL S/W reset */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10); - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a); - - /* Wait for a short time before de-asserting - * to allow the hardware to complete its job. - * This much of delay should be fine for hardware - * to assert and de-assert. - */ - udelay(10); - - /* De-assert PLL S/W reset */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12); - val |= HDMI_8960_PHY_REG12_SW_RESET; - /* Assert PHY S/W reset */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); - val &= ~HDMI_8960_PHY_REG12_SW_RESET; - /* Wait for a short time before de-asserting - to allow the hardware to complete its job. - This much of delay should be fine for hardware - to assert and de-assert. */ - udelay(10); - /* De-assert PHY S/W reset */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x3f); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12); - val |= HDMI_8960_PHY_REG12_PWRDN_B; - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); - /* Wait 10 us for enabling global power for PHY */ - mb(); - udelay(10); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B); - val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B; - val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL; - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80); - - timeout_count = 1000; - while (--pll_lock_retry > 0) { - - /* are we there yet? */ - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0); - if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK) - break; - - udelay(1); - - if (--timeout_count > 0) - continue; - - /* - * PLL has still not locked. - * Do a software reset and try again - * Assert PLL S/W reset first - */ - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); - udelay(10); - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); - - /* - * Wait for a short duration for the PLL calibration - * before checking if the PLL gets locked - */ - udelay(350); - - timeout_count = 1000; - } - - return 0; -} - -static void hdmi_pll_disable(struct clk_hw *hw) -{ - struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); - struct hdmi *hdmi = phy_8960->hdmi; - unsigned int val; - - DBG(""); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12); - val &= ~HDMI_8960_PHY_REG12_PWRDN_B; - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); - - val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B); - val |= HDMI_8960_PHY_REG12_SW_RESET; - val &= ~HDMI_8960_PHY_REG12_PWRDN_B; - hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); - /* Make sure HDMI PHY/PLL are powered down */ - mb(); -} - -static const struct pll_rate *find_rate(unsigned long rate) -{ - int i; - for (i = 1; i < ARRAY_SIZE(freqtbl); i++) - if (rate > freqtbl[i].rate) - return &freqtbl[i-1]; - return &freqtbl[i-1]; -} - -static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); - return phy_8960->pixclk; -} - -static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - const struct pll_rate *pll_rate = find_rate(rate); - return pll_rate->rate; -} - -static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); - struct hdmi *hdmi = phy_8960->hdmi; - const struct pll_rate *pll_rate = find_rate(rate); - int i; - - DBG("rate=%lu", rate); - - for (i = 0; pll_rate->conf[i].reg; i++) - hdmi_write(hdmi, pll_rate->conf[i].reg, pll_rate->conf[i].val); - - phy_8960->pixclk = rate; - - return 0; -} - - -static const struct clk_ops hdmi_pll_ops = { - .enable = hdmi_pll_enable, - .disable = hdmi_pll_disable, - .recalc_rate = hdmi_pll_recalc_rate, - .round_rate = hdmi_pll_round_rate, - .set_rate = hdmi_pll_set_rate, -}; - -static const char *hdmi_pll_parents[] = { - "pxo", -}; - -static struct clk_init_data pll_init = { - .name = "hdmi_pll", - .ops = &hdmi_pll_ops, - .parent_names = hdmi_pll_parents, - .num_parents = ARRAY_SIZE(hdmi_pll_parents), -}; -#endif - -/* - * HDMI Phy: - */ - -static void hdmi_phy_8960_destroy(struct hdmi_phy *phy) -{ - struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); - kfree(phy_8960); -} - static void hdmi_phy_8960_powerup(struct hdmi_phy *phy, - unsigned long int pixclock) + unsigned long int pixclock) { - struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); - struct hdmi *hdmi = phy_8960->hdmi; - DBG("pixclock: %lu", pixclock); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG0, 0x1b); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG1, 0xf2); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG4, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG5, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG6, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG7, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG8, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG9, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG10, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG11, 0x00); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG3, 0x20); } static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy) { - struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); - struct hdmi *hdmi = phy_8960->hdmi; - DBG(""); - hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x7f); } -static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = { - .destroy = hdmi_phy_8960_destroy, - .powerup = hdmi_phy_8960_powerup, - .powerdown = hdmi_phy_8960_powerdown, +static const char * const hdmi_phy_8960_reg_names[] = { + "core-vdda", }; -struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi) -{ - struct hdmi_phy_8960 *phy_8960; - struct hdmi_phy *phy = NULL; - int ret; -#ifdef CONFIG_COMMON_CLK - int i; - - /* sanity check: */ - for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) - if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate)) - return ERR_PTR(-EINVAL); -#endif - - phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); - if (!phy_8960) { - ret = -ENOMEM; - goto fail; - } - - phy = &phy_8960->base; - - phy->funcs = &hdmi_phy_8960_funcs; - - phy_8960->hdmi = hdmi; - -#ifdef CONFIG_COMMON_CLK - phy_8960->pll_hw.init = &pll_init; - phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw); - if (IS_ERR(phy_8960->pll)) { - ret = PTR_ERR(phy_8960->pll); - phy_8960->pll = NULL; - goto fail; - } -#endif - - return phy; +static const char * const hdmi_phy_8960_clk_names[] = { + "slave_iface_clk", +}; -fail: - if (phy) - hdmi_phy_8960_destroy(phy); - return ERR_PTR(ret); -} +const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = { + .type = MSM_HDMI_PHY_8960, + .powerup = hdmi_phy_8960_powerup, + .powerdown = hdmi_phy_8960_powerdown, + .reg_names = hdmi_phy_8960_reg_names, + .num_regs = ARRAY_SIZE(hdmi_phy_8960_reg_names), + .clk_names = hdmi_phy_8960_clk_names, + .num_clks = ARRAY_SIZE(hdmi_phy_8960_clk_names), +}; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c new file mode 100644 index 000000000000..aa94a553794f --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c @@ -0,0 +1,766 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> + +#include "hdmi.h" + +#define HDMI_VCO_MAX_FREQ 12000000000UL +#define HDMI_VCO_MIN_FREQ 8000000000UL + +#define HDMI_PCLK_MAX_FREQ 600000000 +#define HDMI_PCLK_MIN_FREQ 25000000 + +#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL +#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL +#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL +#define HDMI_CORECLK_DIV 5 +#define HDMI_DEFAULT_REF_CLOCK 19200000 +#define HDMI_PLL_CMP_CNT 1024 + +#define HDMI_PLL_POLL_MAX_READS 100 +#define HDMI_PLL_POLL_TIMEOUT_US 150 + +#define HDMI_NUM_TX_CHANNEL 4 + +struct hdmi_pll_8996 { + struct platform_device *pdev; + struct clk_hw clk_hw; + + /* pll mmio base */ + void __iomem *mmio_qserdes_com; + /* tx channel base */ + void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL]; +}; + +#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8996, clk_hw) + +struct hdmi_8996_phy_pll_reg_cfg { + u32 tx_lx_lane_mode[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL]; + u32 com_svs_mode_clk_sel; + u32 com_hsclk_sel; + u32 com_pll_cctrl_mode0; + u32 com_pll_rctrl_mode0; + u32 com_cp_ctrl_mode0; + u32 com_dec_start_mode0; + u32 com_div_frac_start1_mode0; + u32 com_div_frac_start2_mode0; + u32 com_div_frac_start3_mode0; + u32 com_integloop_gain0_mode0; + u32 com_integloop_gain1_mode0; + u32 com_lock_cmp_en; + u32 com_lock_cmp1_mode0; + u32 com_lock_cmp2_mode0; + u32 com_lock_cmp3_mode0; + u32 com_core_clk_en; + u32 com_coreclk_div; + u32 com_vco_tune_ctrl; + + u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_vmode_ctrl1[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_vmode_ctrl2[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_res_code_lane_tx[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_hp_pd_enables[HDMI_NUM_TX_CHANNEL]; + + u32 phy_mode; +}; + +struct hdmi_8996_post_divider { + u64 vco_freq; + int hsclk_divsel; + int vco_ratio; + int tx_band_sel; + int half_rate_mode; +}; + +static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8996 *pll) +{ + return platform_get_drvdata(pll->pdev); +} + +static inline void hdmi_pll_write(struct hdmi_pll_8996 *pll, int offset, + u32 data) +{ + msm_writel(data, pll->mmio_qserdes_com + offset); +} + +static inline u32 hdmi_pll_read(struct hdmi_pll_8996 *pll, int offset) +{ + return msm_readl(pll->mmio_qserdes_com + offset); +} + +static inline void hdmi_tx_chan_write(struct hdmi_pll_8996 *pll, int channel, + int offset, int data) +{ + msm_writel(data, pll->mmio_qserdes_tx[channel] + offset); +} + +static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk, + bool gen_ssc) +{ + if ((frac_start != 0) || gen_ssc) + return (11000000 / (ref_clk / 20)); + + return 0x23; +} + +static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc) +{ + if ((frac_start != 0) || gen_ssc) + return 0x16; + + return 0x10; +} + +static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc) +{ + if ((frac_start != 0) || gen_ssc) + return 0x28; + + return 0x1; +} + +static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk, + bool gen_ssc) +{ + int digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2; + u64 base; + + if ((frac_start != 0) || gen_ssc) + base = (64 * ref_clk) / HDMI_DEFAULT_REF_CLOCK; + else + base = (1022 * ref_clk) / 100; + + base <<= digclk_divsel; + + return (base <= 2046 ? base : 2046); +} + +static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk) +{ + u64 dividend = HDMI_PLL_CMP_CNT * fdata; + u32 divisor = ref_clk * 10; + u32 rem; + + rem = do_div(dividend, divisor); + if (rem > (divisor >> 1)) + dividend++; + + return dividend - 1; +} + +static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk) +{ + u64 fdata = ((u64)pll_cmp) * ref_clk * 10; + + do_div(fdata, HDMI_PLL_CMP_CNT); + + return fdata; +} + +static int pll_get_post_div(struct hdmi_8996_post_divider *pd, u64 bclk) +{ + int ratio[] = { 2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35 }; + int hs_divsel[] = { 0, 4, 8, 12, 1, 5, 2, 9, 3, 13, 10, 7, 14, 11, 15 }; + int tx_band_sel[] = { 0, 1, 2, 3 }; + u64 vco_freq[60]; + u64 vco, vco_optimal; + int half_rate_mode = 0; + int vco_optimal_index, vco_freq_index; + int i, j; + +retry: + vco_optimal = HDMI_VCO_MAX_FREQ; + vco_optimal_index = -1; + vco_freq_index = 0; + for (i = 0; i < 15; i++) { + for (j = 0; j < 4; j++) { + u32 ratio_mult = ratio[i] << tx_band_sel[j]; + + vco = bclk >> half_rate_mode; + vco *= ratio_mult; + vco_freq[vco_freq_index++] = vco; + } + } + + for (i = 0; i < 60; i++) { + u64 vco_tmp = vco_freq[i]; + + if ((vco_tmp >= HDMI_VCO_MIN_FREQ) && + (vco_tmp <= vco_optimal)) { + vco_optimal = vco_tmp; + vco_optimal_index = i; + } + } + + if (vco_optimal_index == -1) { + if (!half_rate_mode) { + half_rate_mode = 1; + goto retry; + } + } else { + pd->vco_freq = vco_optimal; + pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4]; + pd->vco_ratio = ratio[vco_optimal_index / 4]; + pd->hsclk_divsel = hs_divsel[vco_optimal_index / 4]; + + return 0; + } + + return -EINVAL; +} + +static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk, + struct hdmi_8996_phy_pll_reg_cfg *cfg) +{ + struct hdmi_8996_post_divider pd; + u64 bclk; + u64 tmds_clk; + u64 dec_start; + u64 frac_start; + u64 fdata; + u32 pll_divisor; + u32 rem; + u32 cpctrl; + u32 rctrl; + u32 cctrl; + u32 integloop_gain; + u32 pll_cmp; + int i, ret; + + /* bit clk = 10 * pix_clk */ + bclk = ((u64)pix_clk) * 10; + + if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) + tmds_clk = pix_clk >> 2; + else + tmds_clk = pix_clk; + + ret = pll_get_post_div(&pd, bclk); + if (ret) + return ret; + + dec_start = pd.vco_freq; + pll_divisor = 4 * ref_clk; + do_div(dec_start, pll_divisor); + + frac_start = pd.vco_freq * (1 << 20); + + rem = do_div(frac_start, pll_divisor); + frac_start -= dec_start * (1 << 20); + if (rem > (pll_divisor >> 1)) + frac_start++; + + cpctrl = pll_get_cpctrl(frac_start, ref_clk, false); + rctrl = pll_get_rctrl(frac_start, false); + cctrl = pll_get_cctrl(frac_start, false); + integloop_gain = pll_get_integloop_gain(frac_start, bclk, + ref_clk, false); + + fdata = pd.vco_freq; + do_div(fdata, pd.vco_ratio); + + pll_cmp = pll_get_pll_cmp(fdata, ref_clk); + + DBG("VCO freq: %llu", pd.vco_freq); + DBG("fdata: %llu", fdata); + DBG("pix_clk: %lu", pix_clk); + DBG("tmds clk: %llu", tmds_clk); + DBG("HSCLK_SEL: %d", pd.hsclk_divsel); + DBG("DEC_START: %llu", dec_start); + DBG("DIV_FRAC_START: %llu", frac_start); + DBG("PLL_CPCTRL: %u", cpctrl); + DBG("PLL_RCTRL: %u", rctrl); + DBG("PLL_CCTRL: %u", cctrl); + DBG("INTEGLOOP_GAIN: %u", integloop_gain); + DBG("TX_BAND: %d", pd.tx_band_sel); + DBG("PLL_CMP: %u", pll_cmp); + + /* Convert these values to register specific values */ + if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD) + cfg->com_svs_mode_clk_sel = 1; + else + cfg->com_svs_mode_clk_sel = 2; + + cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel); + cfg->com_pll_cctrl_mode0 = cctrl; + cfg->com_pll_rctrl_mode0 = rctrl; + cfg->com_cp_ctrl_mode0 = cpctrl; + cfg->com_dec_start_mode0 = dec_start; + cfg->com_div_frac_start1_mode0 = (frac_start & 0xff); + cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8); + cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16); + cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff); + cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8); + cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff); + cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8); + cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16); + cfg->com_lock_cmp_en = 0x0; + cfg->com_core_clk_en = 0x2c; + cfg->com_coreclk_div = HDMI_CORECLK_DIV; + cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0; + cfg->com_vco_tune_ctrl = 0x0; + + cfg->tx_lx_lane_mode[0] = + cfg->tx_lx_lane_mode[2] = 0x43; + + cfg->tx_lx_hp_pd_enables[0] = + cfg->tx_lx_hp_pd_enables[1] = + cfg->tx_lx_hp_pd_enables[2] = 0x0c; + cfg->tx_lx_hp_pd_enables[3] = 0x3; + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) + cfg->tx_lx_tx_band[i] = pd.tx_band_sel + 4; + + if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) { + cfg->tx_lx_tx_drv_lvl[0] = + cfg->tx_lx_tx_drv_lvl[1] = + cfg->tx_lx_tx_drv_lvl[2] = 0x25; + cfg->tx_lx_tx_drv_lvl[3] = 0x22; + + cfg->tx_lx_tx_emp_post1_lvl[0] = + cfg->tx_lx_tx_emp_post1_lvl[1] = + cfg->tx_lx_tx_emp_post1_lvl[2] = 0x23; + cfg->tx_lx_tx_emp_post1_lvl[3] = 0x27; + + cfg->tx_lx_vmode_ctrl1[0] = + cfg->tx_lx_vmode_ctrl1[1] = + cfg->tx_lx_vmode_ctrl1[2] = + cfg->tx_lx_vmode_ctrl1[3] = 0x00; + + cfg->tx_lx_vmode_ctrl2[0] = + cfg->tx_lx_vmode_ctrl2[1] = + cfg->tx_lx_vmode_ctrl2[2] = 0x0D; + + cfg->tx_lx_vmode_ctrl2[3] = 0x00; + } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) { + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + cfg->tx_lx_tx_drv_lvl[i] = 0x25; + cfg->tx_lx_tx_emp_post1_lvl[i] = 0x23; + cfg->tx_lx_vmode_ctrl1[i] = 0x00; + } + + cfg->tx_lx_vmode_ctrl2[0] = + cfg->tx_lx_vmode_ctrl2[1] = + cfg->tx_lx_vmode_ctrl2[2] = 0x0D; + cfg->tx_lx_vmode_ctrl2[3] = 0x00; + } else { + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + cfg->tx_lx_tx_drv_lvl[i] = 0x20; + cfg->tx_lx_tx_emp_post1_lvl[i] = 0x20; + cfg->tx_lx_vmode_ctrl1[i] = 0x00; + cfg->tx_lx_vmode_ctrl2[i] = 0x0E; + } + } + + DBG("com_svs_mode_clk_sel = 0x%x", cfg->com_svs_mode_clk_sel); + DBG("com_hsclk_sel = 0x%x", cfg->com_hsclk_sel); + DBG("com_lock_cmp_en = 0x%x", cfg->com_lock_cmp_en); + DBG("com_pll_cctrl_mode0 = 0x%x", cfg->com_pll_cctrl_mode0); + DBG("com_pll_rctrl_mode0 = 0x%x", cfg->com_pll_rctrl_mode0); + DBG("com_cp_ctrl_mode0 = 0x%x", cfg->com_cp_ctrl_mode0); + DBG("com_dec_start_mode0 = 0x%x", cfg->com_dec_start_mode0); + DBG("com_div_frac_start1_mode0 = 0x%x", cfg->com_div_frac_start1_mode0); + DBG("com_div_frac_start2_mode0 = 0x%x", cfg->com_div_frac_start2_mode0); + DBG("com_div_frac_start3_mode0 = 0x%x", cfg->com_div_frac_start3_mode0); + DBG("com_integloop_gain0_mode0 = 0x%x", cfg->com_integloop_gain0_mode0); + DBG("com_integloop_gain1_mode0 = 0x%x", cfg->com_integloop_gain1_mode0); + DBG("com_lock_cmp1_mode0 = 0x%x", cfg->com_lock_cmp1_mode0); + DBG("com_lock_cmp2_mode0 = 0x%x", cfg->com_lock_cmp2_mode0); + DBG("com_lock_cmp3_mode0 = 0x%x", cfg->com_lock_cmp3_mode0); + DBG("com_core_clk_en = 0x%x", cfg->com_core_clk_en); + DBG("com_coreclk_div = 0x%x", cfg->com_coreclk_div); + DBG("phy_mode = 0x%x", cfg->phy_mode); + + DBG("tx_l0_lane_mode = 0x%x", cfg->tx_lx_lane_mode[0]); + DBG("tx_l2_lane_mode = 0x%x", cfg->tx_lx_lane_mode[2]); + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + DBG("tx_l%d_tx_band = 0x%x", i, cfg->tx_lx_tx_band[i]); + DBG("tx_l%d_tx_drv_lvl = 0x%x", i, cfg->tx_lx_tx_drv_lvl[i]); + DBG("tx_l%d_tx_emp_post1_lvl = 0x%x", i, + cfg->tx_lx_tx_emp_post1_lvl[i]); + DBG("tx_l%d_vmode_ctrl1 = 0x%x", i, cfg->tx_lx_vmode_ctrl1[i]); + DBG("tx_l%d_vmode_ctrl2 = 0x%x", i, cfg->tx_lx_vmode_ctrl2[i]); + } + + return 0; +} + +static int hdmi_8996_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + struct hdmi_8996_phy_pll_reg_cfg cfg; + int i, ret; + + memset(&cfg, 0x00, sizeof(cfg)); + + ret = pll_calculate(rate, parent_rate, &cfg); + if (ret) { + DRM_ERROR("PLL calculation failed\n"); + return ret; + } + + /* Initially shut down PHY */ + DBG("Disabling PHY"); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x0); + udelay(500); + + /* Power up sequence */ + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x04); + + hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL, 0x0F); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL, 0x0F); + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE, + 0x03); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND, + cfg.tx_lx_tx_band[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN, + 0x03); + } + + hdmi_tx_chan_write(pll, 0, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE, + cfg.tx_lx_lane_mode[0]); + hdmi_tx_chan_write(pll, 2, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE, + cfg.tx_lx_lane_mode[2]); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E); + + /* Bypass VCO calibration */ + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL, + cfg.com_svs_mode_clk_sel); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_TRIM, 0x0F); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_IVCO, 0x0F); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL, + cfg.com_vco_tune_ctrl); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x06); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_SELECT, 0x30); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL, + cfg.com_hsclk_sel); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN, + cfg.com_lock_cmp_en); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0, + cfg.com_pll_cctrl_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0, + cfg.com_pll_rctrl_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0, + cfg.com_cp_ctrl_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0, + cfg.com_dec_start_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0, + cfg.com_div_frac_start1_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0, + cfg.com_div_frac_start2_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0, + cfg.com_div_frac_start3_mode0); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0, + cfg.com_integloop_gain0_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0, + cfg.com_integloop_gain1_mode0); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0, + cfg.com_lock_cmp1_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0, + cfg.com_lock_cmp2_mode0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0, + cfg.com_lock_cmp3_mode0); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN, + cfg.com_core_clk_en); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV, + cfg.com_coreclk_div); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG, 0x02); + + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM, 0x15); + + /* TX lanes setup (TX 0/1/2/3) */ + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL, + cfg.tx_lx_tx_drv_lvl[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL, + cfg.tx_lx_tx_emp_post1_lvl[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1, + cfg.tx_lx_vmode_ctrl1[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2, + cfg.tx_lx_vmode_ctrl2[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET, + 0x00); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET, + 0x00); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN, + 0x03); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN, + 0x40); + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES, + cfg.tx_lx_hp_pd_enables[i]); + } + + hdmi_phy_write(phy, REG_HDMI_8996_PHY_MODE, cfg.phy_mode); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1F); + + /* + * Ensure that vco configuration gets flushed to hardware before + * enabling the PLL + */ + wmb(); + + return 0; +} + +static int hdmi_8996_phy_ready_status(struct hdmi_phy *phy) +{ + u32 nb_tries = HDMI_PLL_POLL_MAX_READS; + unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US; + u32 status; + int phy_ready = 0; + + DBG("Waiting for PHY ready"); + + while (nb_tries--) { + status = hdmi_phy_read(phy, REG_HDMI_8996_PHY_STATUS); + phy_ready = status & BIT(0); + + if (phy_ready) + break; + + udelay(timeout); + } + + DBG("PHY is %sready", phy_ready ? "" : "*not* "); + + return phy_ready; +} + +static int hdmi_8996_pll_lock_status(struct hdmi_pll_8996 *pll) +{ + u32 status; + int nb_tries = HDMI_PLL_POLL_MAX_READS; + unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US; + int pll_locked = 0; + + DBG("Waiting for PLL lock"); + + while (nb_tries--) { + status = hdmi_pll_read(pll, + REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS); + pll_locked = status & BIT(0); + + if (pll_locked) + break; + + udelay(timeout); + } + + DBG("HDMI PLL is %slocked", pll_locked ? "" : "*not* "); + + return pll_locked; +} + +static int hdmi_8996_pll_prepare(struct clk_hw *hw) +{ + struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + int i, ret = 0; + + hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x1); + udelay(100); + + hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19); + udelay(100); + + ret = hdmi_8996_pll_lock_status(pll); + if (!ret) + return ret; + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) + hdmi_tx_chan_write(pll, i, + REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, + 0x6F); + + /* Disable SSC */ + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER1, 0x0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER2, 0x0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1, 0x0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2, 0x0); + hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER, 0x2); + + ret = hdmi_8996_phy_ready_status(phy); + if (!ret) + return ret; + + /* Restart the retiming buffer */ + hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x18); + udelay(1); + hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19); + + return 0; +} + +static long hdmi_8996_pll_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + if (rate < HDMI_PCLK_MIN_FREQ) + return HDMI_PCLK_MIN_FREQ; + else if (rate > HDMI_PCLK_MAX_FREQ) + return HDMI_PCLK_MAX_FREQ; + else + return rate; +} + +static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw); + u64 fdata; + u32 cmp1, cmp2, cmp3, pll_cmp; + + cmp1 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0); + cmp2 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0); + cmp3 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0); + + pll_cmp = cmp1 | (cmp2 << 8) | (cmp3 << 16); + + fdata = pll_cmp_to_fdata(pll_cmp + 1, parent_rate); + + do_div(fdata, 10); + + return fdata; +} + +static void hdmi_8996_pll_unprepare(struct clk_hw *hw) +{ +} + +static int hdmi_8996_pll_is_enabled(struct clk_hw *hw) +{ + struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw); + u32 status; + int pll_locked; + + status = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS); + pll_locked = status & BIT(0); + + return pll_locked; +} + +static struct clk_ops hdmi_8996_pll_ops = { + .set_rate = hdmi_8996_pll_set_clk_rate, + .round_rate = hdmi_8996_pll_round_rate, + .recalc_rate = hdmi_8996_pll_recalc_rate, + .prepare = hdmi_8996_pll_prepare, + .unprepare = hdmi_8996_pll_unprepare, + .is_enabled = hdmi_8996_pll_is_enabled, +}; + +static const char * const hdmi_pll_parents[] = { + "xo", +}; + +static struct clk_init_data pll_init = { + .name = "hdmipll", + .ops = &hdmi_8996_pll_ops, + .parent_names = hdmi_pll_parents, + .num_parents = ARRAY_SIZE(hdmi_pll_parents), +}; + +int msm_hdmi_pll_8996_init(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hdmi_pll_8996 *pll; + struct clk *clk; + int i; + + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); + if (!pll) + return -ENOMEM; + + pll->pdev = pdev; + + pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL"); + if (IS_ERR(pll->mmio_qserdes_com)) { + dev_err(dev, "failed to map pll base\n"); + return -ENOMEM; + } + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + char name[32], label[32]; + + snprintf(name, sizeof(name), "hdmi_tx_l%d", i); + snprintf(label, sizeof(label), "HDMI_TX_L%d", i); + + pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label); + if (IS_ERR(pll->mmio_qserdes_tx[i])) { + dev_err(dev, "failed to map pll base\n"); + return -ENOMEM; + } + } + pll->clk_hw.init = &pll_init; + + clk = devm_clk_register(dev, &pll->clk_hw); + if (IS_ERR(clk)) { + dev_err(dev, "failed to register pll clock\n"); + return -EINVAL; + } + + return 0; +} + +static const char * const hdmi_phy_8996_reg_names[] = { + "vddio", + "vcca", +}; + +static const char * const hdmi_phy_8996_clk_names[] = { + "mmagic_iface_clk", + "iface_clk", + "ref_clk", +}; + +const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = { + .type = MSM_HDMI_PHY_8996, + .reg_names = hdmi_phy_8996_reg_names, + .num_regs = ARRAY_SIZE(hdmi_phy_8996_reg_names), + .clk_names = hdmi_phy_8996_clk_names, + .num_clks = ARRAY_SIZE(hdmi_phy_8996_clk_names), +}; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c index cb01421ae1e4..a68eea4153fc 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c @@ -17,166 +17,122 @@ #include "hdmi.h" -struct hdmi_phy_8x60 { - struct hdmi_phy base; - struct hdmi *hdmi; -}; -#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base) - -static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy) -{ - struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); - kfree(phy_8x60); -} - static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { - struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); - struct hdmi *hdmi = phy_8x60->hdmi; - /* De-serializer delay D/C for non-lbk mode: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0, - HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3)); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG0, + HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3)); if (pixclock == 27000000) { /* video_format == HDMI_VFRMT_720x480p60_16_9 */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1, - HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | - HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3)); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1, + HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | + HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3)); } else { - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1, - HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | - HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4)); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1, + HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) | + HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4)); } /* No matter what, start from the power down mode: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_PWRGEN | - HDMI_8x60_PHY_REG2_PD_PLL | - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_PWRGEN | + HDMI_8x60_PHY_REG2_PD_PLL | + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); /* Turn PowerGen on: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_PLL | - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_PLL | + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); /* Turn PLL power on: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); /* Write to HIGH after PLL power down de-assert: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, - HDMI_8x60_PHY_REG3_PLL_ENABLE); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3, + HDMI_8x60_PHY_REG3_PLL_ENABLE); /* ASIC power on; PHY REG9 = 0 */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0); /* Enable PLL lock detect, PLL lock det will go high after lock * Enable the re-time logic */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12, - HDMI_8x60_PHY_REG12_RETIMING_EN | - HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12, + HDMI_8x60_PHY_REG12_RETIMING_EN | + HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN); /* Drivers are on: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_DESER); /* If the RX detector is needed: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_RCV_SENSE_EN | - HDMI_8x60_PHY_REG2_PD_DESER); - - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0); - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_RCV_SENSE_EN | + HDMI_8x60_PHY_REG2_PD_DESER); + + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG4, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG5, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG6, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG7, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG8, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG10, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG11, 0); /* If we want to use lock enable based on counting: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12, - HDMI_8x60_PHY_REG12_RETIMING_EN | - HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN | - HDMI_8x60_PHY_REG12_FORCE_LOCK); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12, + HDMI_8x60_PHY_REG12_RETIMING_EN | + HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN | + HDMI_8x60_PHY_REG12_FORCE_LOCK); } static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy) { - struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy); - struct hdmi *hdmi = phy_8x60->hdmi; - /* Assert RESET PHY from controller */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, - HDMI_PHY_CTRL_SW_RESET); + hdmi_phy_write(phy, REG_HDMI_PHY_CTRL, + HDMI_PHY_CTRL_SW_RESET); udelay(10); /* De-assert RESET PHY from controller */ - hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0); + hdmi_phy_write(phy, REG_HDMI_PHY_CTRL, 0); /* Turn off Driver */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); udelay(10); /* Disable PLL */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3, 0); /* Power down PHY, but keep RX-sense: */ - hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2, - HDMI_8x60_PHY_REG2_RCV_SENSE_EN | - HDMI_8x60_PHY_REG2_PD_PWRGEN | - HDMI_8x60_PHY_REG2_PD_PLL | - HDMI_8x60_PHY_REG2_PD_DRIVE_4 | - HDMI_8x60_PHY_REG2_PD_DRIVE_3 | - HDMI_8x60_PHY_REG2_PD_DRIVE_2 | - HDMI_8x60_PHY_REG2_PD_DRIVE_1 | - HDMI_8x60_PHY_REG2_PD_DESER); + hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2, + HDMI_8x60_PHY_REG2_RCV_SENSE_EN | + HDMI_8x60_PHY_REG2_PD_PWRGEN | + HDMI_8x60_PHY_REG2_PD_PLL | + HDMI_8x60_PHY_REG2_PD_DRIVE_4 | + HDMI_8x60_PHY_REG2_PD_DRIVE_3 | + HDMI_8x60_PHY_REG2_PD_DRIVE_2 | + HDMI_8x60_PHY_REG2_PD_DRIVE_1 | + HDMI_8x60_PHY_REG2_PD_DESER); } -static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = { - .destroy = hdmi_phy_8x60_destroy, - .powerup = hdmi_phy_8x60_powerup, - .powerdown = hdmi_phy_8x60_powerdown, +const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg = { + .type = MSM_HDMI_PHY_8x60, + .powerup = hdmi_phy_8x60_powerup, + .powerdown = hdmi_phy_8x60_powerdown, }; - -struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi) -{ - struct hdmi_phy_8x60 *phy_8x60; - struct hdmi_phy *phy = NULL; - int ret; - - phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL); - if (!phy_8x60) { - ret = -ENOMEM; - goto fail; - } - - phy = &phy_8x60->base; - - phy->funcs = &hdmi_phy_8x60_funcs; - - phy_8x60->hdmi = hdmi; - - return phy; - -fail: - if (phy) - hdmi_phy_8x60_destroy(phy); - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c index 56ab8917ee9a..c4a61e537851 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c @@ -17,84 +17,40 @@ #include "hdmi.h" -struct hdmi_phy_8x74 { - struct hdmi_phy base; - void __iomem *mmio; -}; -#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base) - - -static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data) -{ - msm_writel(data, phy->mmio + reg); -} - -//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg) -//{ -// return msm_readl(phy->mmio + reg); -//} - -static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy) -{ - struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); - kfree(phy_8x74); -} - static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy, unsigned long int pixclock) { - struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); - - phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0, 0x1b); - phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1, 0xf2); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0); - phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1, 0x20); + hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG0, 0x1b); + hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG1, 0xf2); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_CFG0, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN0, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN1, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN2, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN3, 0x0); + hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL1, 0x20); } static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy) { - struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy); - phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f); + hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL0, 0x7f); } -static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = { - .destroy = hdmi_phy_8x74_destroy, - .powerup = hdmi_phy_8x74_powerup, - .powerdown = hdmi_phy_8x74_powerdown, +static const char * const hdmi_phy_8x74_reg_names[] = { + "core-vdda", + "vddio", }; -struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi) -{ - struct hdmi_phy_8x74 *phy_8x74; - struct hdmi_phy *phy = NULL; - int ret; - - phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL); - if (!phy_8x74) { - ret = -ENOMEM; - goto fail; - } - - phy = &phy_8x74->base; - - phy->funcs = &hdmi_phy_8x74_funcs; - - /* for 8x74, the phy mmio is mapped separately: */ - phy_8x74->mmio = msm_ioremap(hdmi->pdev, - "phy_physical", "HDMI_8x74"); - if (IS_ERR(phy_8x74->mmio)) { - ret = PTR_ERR(phy_8x74->mmio); - goto fail; - } - - return phy; +static const char * const hdmi_phy_8x74_clk_names[] = { + "iface_clk", + "alt_iface_clk" +}; -fail: - if (phy) - hdmi_phy_8x74_destroy(phy); - return ERR_PTR(ret); -} +const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = { + .type = MSM_HDMI_PHY_8x74, + .powerup = hdmi_phy_8x74_powerup, + .powerdown = hdmi_phy_8x74_powerdown, + .reg_names = hdmi_phy_8x74_reg_names, + .num_regs = ARRAY_SIZE(hdmi_phy_8x74_reg_names), + .clk_names = hdmi_phy_8x74_clk_names, + .num_clks = ARRAY_SIZE(hdmi_phy_8x74_clk_names), +}; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c new file mode 100644 index 000000000000..92da69aa6187 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c @@ -0,0 +1,461 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/clk-provider.h> +#include "hdmi.h" + +struct hdmi_pll_8960 { + struct platform_device *pdev; + struct clk_hw clk_hw; + void __iomem *mmio; + + unsigned long pixclk; +}; + +#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8960, clk_hw) + +/* + * HDMI PLL: + * + * To get the parent clock setup properly, we need to plug in hdmi pll + * configuration into common-clock-framework. + */ + +struct pll_rate { + unsigned long rate; + int num_reg; + struct { + u32 val; + u32 reg; + } conf[32]; +}; + +/* NOTE: keep sorted highest freq to lowest: */ +static const struct pll_rate freqtbl[] = { + { 154000000, 14, { + { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + } + }, + /* 1080p60/1080p50 case */ + { 148500000, 27, { + { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, + { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, + { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, + { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, + { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + } + }, + { 108000000, 13, { + { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + } + }, + /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */ + { 74250000, 8, { + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + } + }, + { 74176000, 14, { + { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + } + }, + { 65000000, 14, { + { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + } + }, + /* 480p60/480i60 */ + { 27030000, 18, { + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + } + }, + /* 576p50/576i50 */ + { 27000000, 27, { + { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, + { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, + { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, + { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, + { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + } + }, + /* 640x480p60 */ + { 25200000, 27, { + { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, + { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, + { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, + { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, + { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, + { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + } + }, +}; + +static inline void pll_write(struct hdmi_pll_8960 *pll, u32 reg, u32 data) +{ + msm_writel(data, pll->mmio + reg); +} + +static inline u32 pll_read(struct hdmi_pll_8960 *pll, u32 reg) +{ + return msm_readl(pll->mmio + reg); +} + +static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8960 *pll) +{ + return platform_get_drvdata(pll->pdev); +} + +static int hdmi_pll_enable(struct clk_hw *hw) +{ + struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + int timeout_count, pll_lock_retry = 10; + unsigned int val; + + DBG(""); + + /* Assert PLL S/W reset */ + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10); + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a); + + /* Wait for a short time before de-asserting + * to allow the hardware to complete its job. + * This much of delay should be fine for hardware + * to assert and de-assert. + */ + udelay(10); + + /* De-assert PLL S/W reset */ + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); + + val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12); + val |= HDMI_8960_PHY_REG12_SW_RESET; + /* Assert PHY S/W reset */ + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val); + val &= ~HDMI_8960_PHY_REG12_SW_RESET; + /* + * Wait for a short time before de-asserting to allow the hardware to + * complete its job. This much of delay should be fine for hardware to + * assert and de-assert. + */ + udelay(10); + /* De-assert PHY S/W reset */ + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x3f); + + val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12); + val |= HDMI_8960_PHY_REG12_PWRDN_B; + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val); + /* Wait 10 us for enabling global power for PHY */ + mb(); + udelay(10); + + val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B); + val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B; + val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL; + pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x80); + + timeout_count = 1000; + while (--pll_lock_retry > 0) { + /* are we there yet? */ + val = pll_read(pll, REG_HDMI_8960_PHY_PLL_STATUS0); + if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK) + break; + + udelay(1); + + if (--timeout_count > 0) + continue; + + /* + * PLL has still not locked. + * Do a software reset and try again + * Assert PLL S/W reset first + */ + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); + udelay(10); + pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); + + /* + * Wait for a short duration for the PLL calibration + * before checking if the PLL gets locked + */ + udelay(350); + + timeout_count = 1000; + } + + return 0; +} + +static void hdmi_pll_disable(struct clk_hw *hw) +{ + struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + unsigned int val; + + DBG(""); + + val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12); + val &= ~HDMI_8960_PHY_REG12_PWRDN_B; + hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val); + + val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B); + val |= HDMI_8960_PHY_REG12_SW_RESET; + val &= ~HDMI_8960_PHY_REG12_PWRDN_B; + pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); + /* Make sure HDMI PHY/PLL are powered down */ + mb(); +} + +static const struct pll_rate *find_rate(unsigned long rate) +{ + int i; + + for (i = 1; i < ARRAY_SIZE(freqtbl); i++) + if (rate > freqtbl[i].rate) + return &freqtbl[i - 1]; + + return &freqtbl[i - 1]; +} + +static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw); + + return pll->pixclk; +} + +static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + const struct pll_rate *pll_rate = find_rate(rate); + + return pll_rate->rate; +} + +static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw); + const struct pll_rate *pll_rate = find_rate(rate); + int i; + + DBG("rate=%lu", rate); + + for (i = 0; i < pll_rate->num_reg; i++) + pll_write(pll, pll_rate->conf[i].reg, pll_rate->conf[i].val); + + pll->pixclk = rate; + + return 0; +} + +static const struct clk_ops hdmi_pll_ops = { + .enable = hdmi_pll_enable, + .disable = hdmi_pll_disable, + .recalc_rate = hdmi_pll_recalc_rate, + .round_rate = hdmi_pll_round_rate, + .set_rate = hdmi_pll_set_rate, +}; + +static const char * const hdmi_pll_parents[] = { + "pxo", +}; + +static struct clk_init_data pll_init = { + .name = "hdmi_pll", + .ops = &hdmi_pll_ops, + .parent_names = hdmi_pll_parents, + .num_parents = ARRAY_SIZE(hdmi_pll_parents), +}; + +int msm_hdmi_pll_8960_init(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hdmi_pll_8960 *pll; + struct clk *clk; + int i; + + /* sanity check: */ + for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) + if (WARN_ON(freqtbl[i].rate < freqtbl[i + 1].rate)) + return -EINVAL; + + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); + if (!pll) + return -ENOMEM; + + pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL"); + if (IS_ERR(pll->mmio)) { + dev_err(dev, "failed to map pll base\n"); + return -ENOMEM; + } + + pll->pdev = pdev; + pll->clk_hw.init = &pll_init; + + clk = devm_clk_register(dev, &pll->clk_hw); + if (IS_ERR(clk)) { + dev_err(dev, "failed to register pll clock\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index dbd9cc4daf2e..6eab7d0cf6b5 100644 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h @@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) @@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h index d5d94575fa1b..6688e79cc88e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h @@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) @@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 28df397c3b04..c77e3d4e2c5c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -361,13 +361,6 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, request_pending(crtc, PENDING_FLIP); } -static int mdp4_crtc_set_property(struct drm_crtc *crtc, - struct drm_property *property, uint64_t val) -{ - // XXX - return -EINVAL; -} - #define CURSOR_WIDTH 64 #define CURSOR_HEIGHT 64 @@ -499,7 +492,7 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = mdp4_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, - .set_property = mdp4_crtc_set_property, + .set_property = drm_atomic_helper_crtc_set_property, .cursor_set = mdp4_crtc_cursor_set, .cursor_move = mdp4_crtc_cursor_move, .reset = drm_atomic_helper_crtc_reset, @@ -575,13 +568,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc) return mdp4_crtc->vblank.irqmask; } -void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - DBG("%s: cancel: %p", mdp4_crtc->name, file); - complete_flip(crtc, file); -} - /* set dma config, ie. the format the encoder wants. */ void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config) { diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c index 2f57e9453b67..106f0e772595 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c @@ -47,13 +47,6 @@ static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = { .destroy = mdp4_dsi_encoder_destroy, }; -static bool mdp4_dsi_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -163,7 +156,6 @@ static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = { - .mode_fixup = mdp4_dsi_encoder_mode_fixup, .mode_set = mdp4_dsi_encoder_mode_set, .disable = mdp4_dsi_encoder_disable, .enable = mdp4_dsi_encoder_enable, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c index a21df54cb50f..35ad78a1dc1c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c @@ -94,13 +94,6 @@ static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = { .destroy = mdp4_dtv_encoder_destroy, }; -static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -234,7 +227,6 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = { - .mode_fixup = mdp4_dtv_encoder_mode_fixup, .mode_set = mdp4_dtv_encoder_mode_set, .enable = mdp4_dtv_encoder_enable, .disable = mdp4_dtv_encoder_disable, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 5a8e3d6bcbff..76e1dfb5d25e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -179,19 +179,20 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, } } -static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) -{ - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - struct msm_drm_private *priv = mdp4_kms->dev->dev_private; - unsigned i; - - for (i = 0; i < priv->num_crtcs; i++) - mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file); -} +static const char * const iommu_ports[] = { + "mdp_port0_cb0", "mdp_port1_cb0", +}; static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + struct msm_mmu *mmu = mdp4_kms->mmu; + + if (mmu) { + mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); + mmu->funcs->destroy(mmu); + } + if (mdp4_kms->blank_cursor_iova) msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); if (mdp4_kms->blank_cursor_bo) @@ -213,7 +214,6 @@ static const struct mdp_kms_funcs kms_funcs = { .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done, .get_format = mdp_get_format, .round_pixclk = mdp4_round_pixclk, - .preclose = mdp4_preclose, .destroy = mdp4_destroy, }, .set_irqmask = mdp4_set_irqmask, @@ -326,7 +326,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, if (priv->hdmi) { /* Construct bridge/connector for HDMI: */ - ret = hdmi_modeset_init(priv->hdmi, dev, encoder); + ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); if (ret) { dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); return ret; @@ -457,10 +457,6 @@ fail: return ret; } -static const char *iommu_ports[] = { - "mdp_port0_cb0", "mdp_port1_cb0", -}; - struct msm_kms *mdp4_kms_init(struct drm_device *dev) { struct platform_device *pdev = dev->platformdev; @@ -565,6 +561,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ARRAY_SIZE(iommu_ports)); if (ret) goto fail; + + mdp4_kms->mmu = mmu; } else { dev_info(dev->dev, "no iommu, fallback to phys " "contig buffers for scanout\n"); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index d2c96ef431f4..b2828717be2a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -45,6 +45,7 @@ struct mdp4_kms { struct clk *pclk; struct clk *lut_clk; struct clk *axi_clk; + struct msm_mmu *mmu; struct mdp_irq error_handler; @@ -199,7 +200,6 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev, enum mdp4_pipe pipe_id, bool private_plane); uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc); -void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer); void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c index cd63fedb67cc..bc3d8e719c6c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c @@ -260,13 +260,6 @@ static void setup_phy(struct drm_encoder *encoder) mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0); } -static bool mdp4_lcdc_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -430,7 +423,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = { - .mode_fixup = mdp4_lcdc_encoder_mode_fixup, .mode_set = mdp4_lcdc_encoder_mode_set, .disable = mdp4_lcdc_encoder_disable, .enable = mdp4_lcdc_encoder_enable, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index c37da9c61e29..b275ce11b24b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) @@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index 1aa21dba663d..69094cb28103 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -188,13 +188,6 @@ static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = { .destroy = mdp5_cmd_encoder_destroy, }; -static bool mdp5_cmd_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -256,7 +249,6 @@ static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = { - .mode_fixup = mdp5_cmd_encoder_mode_fixup, .mode_set = mdp5_cmd_encoder_mode_set, .disable = mdp5_cmd_encoder_disable, .enable = mdp5_cmd_encoder_enable, @@ -340,4 +332,3 @@ fail: return ERR_PTR(ret); } - diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 20cee5ce4071..a064d9712234 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -468,13 +468,6 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, request_pending(crtc, PENDING_FLIP); } -static int mdp5_crtc_set_property(struct drm_crtc *crtc, - struct drm_property *property, uint64_t val) -{ - // XXX - return -EINVAL; -} - static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); @@ -625,7 +618,7 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = mdp5_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, - .set_property = mdp5_crtc_set_property, + .set_property = drm_atomic_helper_crtc_set_property, .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, @@ -721,12 +714,6 @@ uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) return mdp5_crtc->vblank.irqmask; } -void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) -{ - DBG("cancel: %p", file); - complete_flip(crtc, file); -} - void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, struct mdp5_interface *intf, struct mdp5_ctl *ctl) { diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 0d737cad03a6..1d95f9fd9dc7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -112,13 +112,6 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = { .destroy = mdp5_encoder_destroy, }; -static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void mdp5_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -287,7 +280,6 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { - .mode_fixup = mdp5_encoder_mode_fixup, .mode_set = mdp5_encoder_mode_set, .disable = mdp5_encoder_disable, .enable = mdp5_encoder_enable, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index e115318402bd..484b4d15e71d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -117,16 +117,6 @@ static int mdp5_set_split_display(struct msm_kms *kms, return mdp5_encoder_set_split_display(encoder, slave_encoder); } -static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct msm_drm_private *priv = mdp5_kms->dev->dev_private; - unsigned i; - - for (i = 0; i < priv->num_crtcs; i++) - mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file); -} - static void mdp5_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); @@ -164,7 +154,6 @@ static const struct mdp_kms_funcs kms_funcs = { .get_format = mdp_get_format, .round_pixclk = mdp5_round_pixclk, .set_split_display = mdp5_set_split_display, - .preclose = mdp5_preclose, .destroy = mdp5_destroy, }, .set_irqmask = mdp5_set_irqmask, @@ -295,7 +284,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; } - ret = hdmi_modeset_init(priv->hdmi, dev, encoder); + ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); break; case INTF_DSI: { diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 00730ba08a60..9a25898239d3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -211,7 +211,6 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); int mdp5_crtc_get_lm(struct drm_crtc *crtc); -void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, struct mdp5_interface *intf, struct mdp5_ctl *ctl); void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h index 0aec1ac1f6d0..452e3518f98b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h @@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) @@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9a30807b900b..d52910e2c26c 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -61,7 +61,7 @@ module_param(fbdev, bool, 0600); #endif static char *vram = "16m"; -MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); +MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); module_param(vram, charp, 0); /* @@ -196,6 +196,11 @@ static int msm_unload(struct drm_device *dev) } drm_kms_helper_poll_fini(dev); + +#ifdef CONFIG_DRM_FBDEV_EMULATION + if (fbdev && priv->fbdev) + msm_fbdev_free(dev); +#endif drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); @@ -1116,7 +1121,7 @@ static int __init msm_drm_register(void) DBG("init"); msm_dsi_register(); msm_edp_register(); - hdmi_register(); + msm_hdmi_register(); adreno_register(); return platform_driver_register(&msm_platform_driver); } @@ -1125,7 +1130,7 @@ static void __exit msm_drm_unregister(void) { DBG("fini"); platform_driver_unregister(&msm_platform_driver); - hdmi_unregister(); + msm_hdmi_unregister(); adreno_unregister(); msm_edp_unregister(); msm_dsi_unregister(); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index c1e7bba2fdb7..870dbe58c259 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -240,12 +240,13 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); +void msm_fbdev_free(struct drm_device *dev); struct hdmi; -int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, +int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, struct drm_encoder *encoder); -void __init hdmi_register(void); -void __exit hdmi_unregister(void); +void __init msm_hdmi_register(void); +void __exit msm_hdmi_unregister(void); struct msm_edp; void __init msm_edp_register(void); diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d95af6eba602..d9759bf3482e 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -62,12 +62,8 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par; struct msm_fbdev *fbdev = to_msm_fbdev(helper); struct drm_gem_object *drm_obj = fbdev->bo; - struct drm_device *dev = helper->dev; int ret = 0; - if (drm_device_is_unplugged(dev)) - return -ENODEV; - ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma); if (ret) { pr_err("%s:drm_gem_mmap_obj fail\n", __func__); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 6d7cd3fe21e7..43d2181231c0 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -323,28 +323,27 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_msm_gem_submit *args = data; struct msm_file_private *ctx = file->driver_priv; struct msm_gem_submit *submit; - struct msm_gpu *gpu; + struct msm_gpu *gpu = priv->gpu; unsigned i; int ret; + if (!gpu) + return -ENXIO; + /* for now, we just have 3d pipe.. eventually this would need to * be more clever to dispatch to appropriate gpu module: */ if (args->pipe != MSM_PIPE_3D0) return -EINVAL; - gpu = priv->gpu; - if (args->nr_cmds > MAX_CMDS) return -EINVAL; - mutex_lock(&dev->struct_mutex); - submit = submit_create(dev, gpu, args->nr_bos); - if (!submit) { - ret = -ENOMEM; - goto out; - } + if (!submit) + return -ENOMEM; + + mutex_lock(&dev->struct_mutex); ret = submit_lookup_objects(submit, args, file); if (ret) @@ -419,8 +418,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, args->fence = submit->fence; out: - if (submit) - submit_cleanup(submit, !!ret); + submit_cleanup(submit, !!ret); mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 7ac2f1997e4a..a7a0b6d9b057 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -31,13 +31,15 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, return 0; } -static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) +static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, + int cnt) { struct msm_iommu *iommu = to_msm_iommu(mmu); return iommu_attach_device(iommu->domain, mmu->dev); } -static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt) +static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, + int cnt) { struct msm_iommu *iommu = to_msm_iommu(mmu); iommu_detach_device(iommu->domain, mmu->dev); diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 7cd88d9dc155..b8ca9a0e9170 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -21,8 +21,8 @@ #include <linux/iommu.h> struct msm_mmu_funcs { - int (*attach)(struct msm_mmu *mmu, const char **names, int cnt); - void (*detach)(struct msm_mmu *mmu, const char **names, int cnt); + int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt); + void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt); int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, unsigned len, int prot); int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index d5e6938cc6bc..cdf522770cfa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -314,7 +314,7 @@ void nouveau_register_dsm_handler(void) if (!r) return; - vga_switcheroo_register_handler(&nouveau_dsm_handler); + vga_switcheroo_register_handler(&nouveau_dsm_handler, 0); } /* Must be called for Optimus models before the card can be turned off */ diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index fcebfae5d426..ae96ebc490fb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -27,6 +27,7 @@ #include <acpi/button.h> #include <linux/pm_runtime.h> +#include <linux/vga_switcheroo.h> #include <drm/drmP.h> #include <drm/drm_edid.h> @@ -153,6 +154,17 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) if (ret == 0) break; } else + if ((vga_switcheroo_handler_flags() & + VGA_SWITCHEROO_CAN_SWITCH_DDC) && + nv_encoder->dcb->type == DCB_OUTPUT_LVDS && + nv_encoder->i2c) { + int ret; + vga_switcheroo_lock_ddc(dev->pdev); + ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50); + vga_switcheroo_unlock_ddc(dev->pdev); + if (ret) + break; + } else if (nv_encoder->i2c) { if (nvkm_probe_i2c(nv_encoder->i2c, 0x50)) break; @@ -265,7 +277,14 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) nv_encoder = nouveau_connector_ddc_detect(connector); if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { - nv_connector->edid = drm_get_edid(connector, i2c); + if ((vga_switcheroo_handler_flags() & + VGA_SWITCHEROO_CAN_SWITCH_DDC) && + nv_connector->type == DCB_CONNECTOR_LVDS) + nv_connector->edid = drm_get_edid_switcheroo(connector, + i2c); + else + nv_connector->edid = drm_get_edid(connector, i2c); + drm_mode_connector_update_edid_property(connector, nv_connector->edid); if (!nv_connector->edid) { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2f2f252e3fb6..bb8498c9b13e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -22,11 +22,13 @@ * Authors: Ben Skeggs */ +#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> +#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include "drmP.h" @@ -312,6 +314,15 @@ static int nouveau_drm_probe(struct pci_dev *pdev, bool boot = false; int ret; + /* + * apple-gmux is needed on dual GPU MacBook Pro + * to probe the panel if we're the inactive GPU. + */ + if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && + apple_gmux_present() && pdev != vga_default_device() && + !vga_switcheroo_handler_flags()) + return -EPROBE_DEFER; + /* remove conflicting drivers (vesafb, efifb etc) */ aper = alloc_apertures(3); if (!aper) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 263f4b7a5789..04097dab8589 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -271,18 +271,7 @@ static void omap_crtc_complete_page_flip(struct drm_crtc *crtc) return; spin_lock_irqsave(&dev->event_lock, flags); - - list_del(&event->base.link); - - /* - * Queue the event for delivery if it's still linked to a file - * handle, otherwise just destroy it. - */ - if (event->base.file_priv) - drm_crtc_send_vblank_event(crtc, event); - else - event->base.destroy(&event->base); - + drm_crtc_send_vblank_event(crtc, event); spin_unlock_irqrestore(&dev->event_lock, flags); } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index c69a519f5d87..80398a684cae 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -142,7 +142,6 @@ static int omap_atomic_commit(struct drm_device *dev, { struct omap_drm_private *priv = dev->dev_private; struct omap_atomic_state_commit *commit; - unsigned long flags; unsigned int i; int ret; @@ -175,17 +174,6 @@ static int omap_atomic_commit(struct drm_device *dev, priv->commit.pending |= commit->crtcs; spin_unlock(&priv->commit.lock); - /* Keep track of all CRTC events to unlink them in preclose(). */ - spin_lock_irqsave(&dev->event_lock, flags); - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - struct drm_crtc_state *cstate = state->crtc_states[i]; - - if (cstate && cstate->event) - list_add_tail(&cstate->event->base.link, - &priv->commit.events); - } - spin_unlock_irqrestore(&dev->event_lock, flags); - /* Swap the state, this is the point of no return. */ drm_atomic_helper_swap_state(dev, state); @@ -675,7 +663,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags) priv->wq = alloc_ordered_workqueue("omapdrm", 0); init_waitqueue_head(&priv->commit.wait); spin_lock_init(&priv->commit.lock); - INIT_LIST_HEAD(&priv->commit.events); spin_lock_init(&priv->list_lock); INIT_LIST_HEAD(&priv->obj_list); @@ -789,33 +776,6 @@ static void dev_lastclose(struct drm_device *dev) } } -static void dev_preclose(struct drm_device *dev, struct drm_file *file) -{ - struct omap_drm_private *priv = dev->dev_private; - struct drm_pending_event *event; - unsigned long flags; - - DBG("preclose: dev=%p", dev); - - /* - * Unlink all pending CRTC events to make sure they won't be queued up - * by a pending asynchronous commit. - */ - spin_lock_irqsave(&dev->event_lock, flags); - list_for_each_entry(event, &priv->commit.events, link) { - if (event->file_priv == file) { - file->event_space += event->event->length; - event->file_priv = NULL; - } - } - spin_unlock_irqrestore(&dev->event_lock, flags); -} - -static void dev_postclose(struct drm_device *dev, struct drm_file *file) -{ - DBG("postclose: dev=%p, file=%p", dev, file); -} - static const struct vm_operations_struct omap_gem_vm_ops = { .fault = omap_gem_fault, .open = drm_gem_vm_open, @@ -840,8 +800,6 @@ static struct drm_driver omap_drm_driver = { .unload = dev_unload, .open = dev_open, .lastclose = dev_lastclose, - .preclose = dev_preclose, - .postclose = dev_postclose, .set_busid = drm_platform_set_busid, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = omap_irq_enable_vblank, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index defe74d28b04..0fbe17d0ec6f 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -108,7 +108,6 @@ struct omap_drm_private { /* atomic commit */ struct { - struct list_head events; wait_queue_head_t wait; u32 pending; spinlock_t lock; /* Protects commit.pending */ diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 19e426b698d3..3cf8aab23a39 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -83,7 +83,7 @@ static void omap_gem_dmabuf_release(struct dma_buf *buffer) static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, - size_t start, size_t len, enum dma_data_direction dir) + enum dma_data_direction dir) { struct drm_gem_object *obj = buffer->priv; struct page **pages; @@ -98,7 +98,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, } static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, - size_t start, size_t len, enum dma_data_direction dir) + enum dma_data_direction dir) { struct drm_gem_object *obj = buffer->priv; omap_gem_put_pages(obj); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index f88a631c43ab..2164c999052c 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1016,6 +1016,7 @@ static const struct drm_display_mode nec_nl4827hc19_05b_mode = { .vsync_end = 272 + 2 + 4, .vtotal = 272 + 2 + 4 + 2, .vrefresh = 74, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static const struct panel_desc nec_nl4827hc19_05b = { diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 86276519b2ef..43e5f503d1c5 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -734,14 +734,6 @@ static void qxl_enc_dpms(struct drm_encoder *encoder, int mode) DRM_DEBUG("\n"); } -static bool qxl_enc_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - DRM_DEBUG("\n"); - return true; -} - static void qxl_enc_prepare(struct drm_encoder *encoder) { DRM_DEBUG("\n"); @@ -864,7 +856,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector) static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = { .dpms = qxl_enc_dpms, - .mode_fixup = qxl_enc_mode_fixup, .prepare = qxl_enc_prepare, .mode_set = qxl_enc_mode_set, .commit = qxl_enc_commit, diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 01b20e14a247..1603751b1164 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -2623,16 +2623,8 @@ radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) } -static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { .dpms = radeon_atom_ext_dpms, - .mode_fixup = radeon_atom_ext_mode_fixup, .prepare = radeon_atom_ext_prepare, .mode_set = radeon_atom_ext_mode_set, .commit = radeon_atom_ext_commit, diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 4c30d8c65558..06001400ce8b 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -4219,13 +4219,20 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); return r; } - r = radeon_fence_wait(ib.fence, false); - if (r) { + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( + RADEON_USEC_IB_TEST_TIMEOUT)); + if (r < 0) { DRM_ERROR("radeon: fence wait failed (%d).\n", r); radeon_scratch_free(rdev, scratch); radeon_ib_free(rdev, &ib); return r; + } else if (r == 0) { + DRM_ERROR("radeon: fence wait timed out.\n"); + radeon_scratch_free(rdev, scratch); + radeon_ib_free(rdev, &ib); + return -ETIMEDOUT; } + r = 0; for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index d16f2eebd95e..9c351dc8a9e0 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -737,11 +737,16 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); return r; } - r = radeon_fence_wait(ib.fence, false); - if (r) { + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( + RADEON_USEC_IB_TEST_TIMEOUT)); + if (r < 0) { DRM_ERROR("radeon: fence wait failed (%d).\n", r); return r; + } else if (r == 0) { + DRM_ERROR("radeon: fence wait timed out.\n"); + return -ETIMEDOUT; } + r = 0; for (i = 0; i < rdev->usec_timeout; i++) { tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 5eae0a88dd3e..6e478a248628 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3732,11 +3732,17 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); goto free_ib; } - r = radeon_fence_wait(ib.fence, false); - if (r) { + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( + RADEON_USEC_IB_TEST_TIMEOUT)); + if (r < 0) { DRM_ERROR("radeon: fence wait failed (%d).\n", r); goto free_ib; + } else if (r == 0) { + DRM_ERROR("radeon: fence wait timed out.\n"); + r = -ETIMEDOUT; + goto free_ib; } + r = 0; for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index cc2fdf0be37a..ed121042247f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3381,11 +3381,17 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); goto free_ib; } - r = radeon_fence_wait(ib.fence, false); - if (r) { + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( + RADEON_USEC_IB_TEST_TIMEOUT)); + if (r < 0) { DRM_ERROR("radeon: fence wait failed (%d).\n", r); goto free_ib; + } else if (r == 0) { + DRM_ERROR("radeon: fence wait timed out.\n"); + r = -ETIMEDOUT; + goto free_ib; } + r = 0; for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index d2dd29ab24fa..fb65e6fb5c4f 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -368,11 +368,16 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); return r; } - r = radeon_fence_wait(ib.fence, false); - if (r) { + r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( + RADEON_USEC_IB_TEST_TIMEOUT)); + if (r < 0) { DRM_ERROR("radeon: fence wait failed (%d).\n", r); return r; + } else if (r == 0) { + DRM_ERROR("radeon: fence wait timed out.\n"); + return -ETIMEDOUT; } + r = 0; for (i = 0; i < rdev->usec_timeout; i++) { tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 78a51b3eda10..007be29a0020 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -120,6 +120,7 @@ extern int radeon_mst; */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) +#define RADEON_USEC_IB_TEST_TIMEOUT 1000000 /* 1s */ /* RADEON_IB_POOL_SIZE must be a power of 2 */ #define RADEON_IB_POOL_SIZE 16 #define RADEON_DEBUGFS_MAX_COMPONENTS 32 @@ -382,6 +383,7 @@ void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring); int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); void radeon_fence_process(struct radeon_device *rdev, int ring); bool radeon_fence_signaled(struct radeon_fence *fence); +long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, long timeout); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); int radeon_fence_wait_next(struct radeon_device *rdev, int ring); int radeon_fence_wait_empty(struct radeon_device *rdev, int ring); diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index c4b4f298a283..56482e35d43e 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -551,13 +551,14 @@ static bool radeon_atpx_detect(void) void radeon_register_atpx_handler(void) { bool r; + enum vga_switcheroo_handler_flags_t handler_flags = 0; /* detect if we have any ATPX + 2 VGA in the system */ r = radeon_atpx_detect(); if (!r) return; - vga_switcheroo_register_handler(&radeon_atpx_handler); + vga_switcheroo_register_handler(&radeon_atpx_handler, handler_flags); } /** diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 340f3f549f29..cfcc099c537d 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -34,6 +34,7 @@ #include "atom.h" #include <linux/pm_runtime.h> +#include <linux/vga_switcheroo.h> static int radeon_dp_handle_hpd(struct drm_connector *connector) { @@ -344,6 +345,11 @@ static void radeon_connector_get_edid(struct drm_connector *connector) else if (radeon_connector->ddc_bus) radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); + } else if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC && + connector->connector_type == DRM_MODE_CONNECTOR_LVDS && + radeon_connector->ddc_bus) { + radeon_connector->edid = drm_get_edid_switcheroo(&radeon_connector->base, + &radeon_connector->ddc_bus->adapter); } else if (radeon_connector->ddc_bus) { radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 298ea1c453c3..a4674bfd979a 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1686,6 +1686,9 @@ void radeon_modeset_fini(struct radeon_device *rdev) radeon_fbdev_fini(rdev); kfree(rdev->mode_info.bios_hardcoded_edid); + /* free i2c buses */ + radeon_i2c_fini(rdev); + if (rdev->mode_info.mode_config_initialized) { radeon_afmt_fini(rdev); drm_kms_helper_poll_fini(rdev->ddev); @@ -1693,8 +1696,6 @@ void radeon_modeset_fini(struct radeon_device *rdev) drm_mode_config_cleanup(rdev->ddev); rdev->mode_info.mode_config_initialized = false; } - /* free i2c buses */ - radeon_i2c_fini(rdev); } static bool is_hdtv_mode(const struct drm_display_mode *mode) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e266ffc520d2..ccd4ad4ee592 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -34,9 +34,11 @@ #include "radeon_drv.h" #include <drm/drm_pciids.h> +#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> +#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <drm/drm_gem.h> @@ -319,6 +321,23 @@ static int radeon_pci_probe(struct pci_dev *pdev, { int ret; + /* + * Initialize amdkfd before starting radeon. If it was not loaded yet, + * defer radeon probing + */ + ret = radeon_kfd_init(); + if (ret == -EPROBE_DEFER) + return ret; + + /* + * apple-gmux is needed on dual GPU MacBook Pro + * to probe the panel if we're the inactive GPU. + */ + if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && + apple_gmux_present() && pdev != vga_default_device() && + !vga_switcheroo_handler_flags()) + return -EPROBE_DEFER; + /* Get rid of things like offb */ ret = radeon_kick_out_firmware_fb(pdev); if (ret) @@ -570,8 +589,6 @@ static int __init radeon_init(void) return -EINVAL; } - radeon_kfd_init(); - /* let modprobe override vga console setting */ return drm_pci_init(driver, pdriver); } diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 05815c47b246..7ef075acde9c 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -527,7 +527,7 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, } /** - * radeon_fence_wait - wait for a fence to signal + * radeon_fence_wait_timeout - wait for a fence to signal with timeout * * @fence: radeon fence object * @intr: use interruptible sleep @@ -535,12 +535,15 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, * Wait for the requested fence to signal (all asics). * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the fence. - * Returns 0 if the fence has passed, error for all other cases. + * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait + * Returns remaining time if the sequence number has passed, 0 when + * the wait timeout, or an error for all other cases. */ -int radeon_fence_wait(struct radeon_fence *fence, bool intr) +long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout) { uint64_t seq[RADEON_NUM_RINGS] = {}; long r; + int r_sig; /* * This function should not be called on !radeon fences. @@ -552,15 +555,36 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) return fence_wait(&fence->base, intr); seq[fence->ring] = fence->seq; - r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); - if (r < 0) { + r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); + if (r <= 0) { return r; } - r = fence_signal(&fence->base); - if (!r) + r_sig = fence_signal(&fence->base); + if (!r_sig) FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); - return 0; + return r; +} + +/** + * radeon_fence_wait - wait for a fence to signal + * + * @fence: radeon fence object + * @intr: use interruptible sleep + * + * Wait for the requested fence to signal (all asics). + * @intr selects whether to use interruptable (true) or non-interruptable + * (false) sleep when waiting for the fence. + * Returns 0 if the fence has passed, error for all other cases. + */ +int radeon_fence_wait(struct radeon_fence *fence, bool intr) +{ + long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); + if (r > 0) { + return 0; + } else { + return r; + } } /** diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 9a4d69e59401..87a9ebb5f58f 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -132,35 +132,34 @@ static const struct kfd2kgd_calls kfd2kgd = { static const struct kgd2kfd_calls *kgd2kfd; -bool radeon_kfd_init(void) +int radeon_kfd_init(void) { + int ret; + #if defined(CONFIG_HSA_AMD_MODULE) - bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); + int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); kgd2kfd_init_p = symbol_request(kgd2kfd_init); if (kgd2kfd_init_p == NULL) - return false; + return -ENOENT; - if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) { + ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd); + if (ret) { symbol_put(kgd2kfd_init); kgd2kfd = NULL; - - return false; } - return true; #elif defined(CONFIG_HSA_AMD) - if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) { + ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd); + if (ret) kgd2kfd = NULL; - return false; - } - - return true; #else - return false; + ret = -ENOENT; #endif + + return ret; } void radeon_kfd_fini(void) diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/drivers/gpu/drm/radeon/radeon_kfd.h index 1103f9082f6b..9df1fea8e971 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.h +++ b/drivers/gpu/drm/radeon/radeon_kfd.h @@ -33,7 +33,7 @@ struct radeon_device; -bool radeon_kfd_init(void); +int radeon_kfd_init(void); void radeon_kfd_fini(void); void radeon_kfd_suspend(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 7eb1ae758906..566a1a01f6d1 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -810,11 +810,16 @@ int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) goto error; } - r = radeon_fence_wait(fence, false); - if (r) { + r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies( + RADEON_USEC_IB_TEST_TIMEOUT)); + if (r < 0) { DRM_ERROR("radeon: fence wait failed (%d).\n", r); + } else if (r == 0) { + DRM_ERROR("radeon: fence wait timed out.\n"); + r = -ETIMEDOUT; } else { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } error: radeon_fence_unref(&fence); diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index c6b1cbca47fc..12ddcfa82e20 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -522,11 +522,17 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) goto error; } - r = radeon_fence_wait(fence, false); - if (r) { + r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies( + RADEON_USEC_IB_TEST_TIMEOUT)); + if (r < 0) { DRM_ERROR("radeon: fence wait failed (%d).\n", r); goto error; + } else if (r == 0) { + DRM_ERROR("radeon: fence wait timed out.\n"); + r = -ETIMEDOUT; + goto error; } + r = 0; DRM_INFO("ib test on ring %d succeeded\n", ring->idx); error: radeon_fence_unref(&fence); diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 96dcd4a78951..1f10fa0928b4 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -1,6 +1,7 @@ config DRM_RCAR_DU tristate "DRM Support for R-Car Display Unit" - depends on DRM && ARM && OF + depends on DRM && OF + depends on ARM || ARM64 depends on ARCH_SHMOBILE || COMPILE_TEST select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER @@ -14,14 +15,18 @@ config DRM_RCAR_DU config DRM_RCAR_HDMI bool "R-Car DU HDMI Encoder Support" depends on DRM_RCAR_DU - depends on OF help Enable support for external HDMI encoders. config DRM_RCAR_LVDS bool "R-Car DU LVDS Encoder Support" depends on DRM_RCAR_DU - depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST help - Enable support for the R-Car Display Unit embedded LVDS encoders - (currently only on R8A7790 and R8A7791). + Enable support for the R-Car Display Unit embedded LVDS encoders. + +config DRM_RCAR_VSP + bool "R-Car DU VSP Compositor Support" + depends on DRM_RCAR_DU + depends on VIDEO_RENESAS_VSP1 + help + Enable support to expose the R-Car VSP Compositor as KMS planes. diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 05de1c4097af..827711e28226 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -11,4 +11,6 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \ rcar_du_hdmienc.o rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o +rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o + obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 88a4b706be16..51e9e8ce551a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -1,7 +1,7 @@ /* * rcar_du_crtc.c -- R-Car Display Unit CRTCs * - * Copyright (C) 2013-2014 Renesas Electronics Corporation + * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * @@ -28,6 +28,7 @@ #include "rcar_du_kms.h" #include "rcar_du_plane.h" #include "rcar_du_regs.h" +#include "rcar_du_vsp.h" static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg) { @@ -150,7 +151,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) /* Signal polarities */ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL) - | DSMR_DIPM_DE | DSMR_CSPM; + | DSMR_DIPM_DISP | DSMR_CSPM; rcar_du_crtc_write(rcrtc, DSMR, value); /* Display timings */ @@ -207,6 +208,7 @@ plane_format(struct rcar_du_plane *plane) static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) { struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; + struct rcar_du_device *rcdu = rcrtc->group->dev; unsigned int num_planes = 0; unsigned int dptsr_planes; unsigned int hwplanes = 0; @@ -250,6 +252,17 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) } } + /* If VSP+DU integration is enabled the plane assignment is fixed. */ + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) { + if (rcdu->info->gen < 3) { + dspr = (rcrtc->index % 2) + 1; + hwplanes = 1 << (rcrtc->index % 2); + } else { + dspr = (rcrtc->index % 2) ? 3 : 1; + hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0); + } + } + /* Update the planes to display timing and dot clock generator * associations. * @@ -272,6 +285,10 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) rcar_du_group_restart(rcrtc->group); } + /* Restart the group if plane sources have changed. */ + if (rcrtc->group->need_restart) + rcar_du_group_restart(rcrtc->group); + mutex_unlock(&rcrtc->group->lock); rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, @@ -282,26 +299,6 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) * Page Flip */ -void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, - struct drm_file *file) -{ - struct drm_pending_vblank_event *event; - struct drm_device *dev = rcrtc->crtc.dev; - unsigned long flags; - - /* Destroy the pending vertical blanking event associated with the - * pending page flip, if any, and disable vertical blanking interrupts. - */ - spin_lock_irqsave(&dev->event_lock, flags); - event = rcrtc->event; - if (event && event->base.file_priv == file) { - rcrtc->event = NULL; - event->base.destroy(&event->base); - drm_crtc_vblank_put(&rcrtc->crtc); - } - spin_unlock_irqrestore(&dev->event_lock, flags); -} - static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc) { struct drm_pending_vblank_event *event; @@ -385,6 +382,10 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) rcar_du_group_start_stop(rcrtc->group, true); + /* Enable the VSP compositor. */ + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + rcar_du_vsp_enable(rcrtc); + /* Turn vertical blanking interrupt reporting back on. */ drm_crtc_vblank_on(crtc); @@ -418,6 +419,10 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) rcar_du_crtc_wait_page_flip(rcrtc); drm_crtc_vblank_off(crtc); + /* Disable the VSP compositor. */ + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + rcar_du_vsp_disable(rcrtc); + /* Select switch sync mode. This stops display operation and configures * the HSYNC and VSYNC signals as inputs. */ @@ -430,6 +435,9 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc) { + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + rcar_du_vsp_disable(rcrtc); + rcar_du_crtc_stop(rcrtc); rcar_du_crtc_put(rcrtc); } @@ -438,20 +446,24 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) { unsigned int i; - if (!rcrtc->enabled) + if (!rcrtc->crtc.state->active) return; rcar_du_crtc_get(rcrtc); rcar_du_crtc_start(rcrtc); /* Commit the planes state. */ - for (i = 0; i < rcrtc->group->num_planes; ++i) { - struct rcar_du_plane *plane = &rcrtc->group->planes[i]; + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) { + rcar_du_vsp_enable(rcrtc); + } else { + for (i = 0; i < rcrtc->group->num_planes; ++i) { + struct rcar_du_plane *plane = &rcrtc->group->planes[i]; - if (plane->plane.state->crtc != &rcrtc->crtc) - continue; + if (plane->plane.state->crtc != &rcrtc->crtc) + continue; - rcar_du_plane_setup(plane); + rcar_du_plane_setup(plane); + } } rcar_du_crtc_update_planes(rcrtc); @@ -465,26 +477,17 @@ static void rcar_du_crtc_enable(struct drm_crtc *crtc) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - if (rcrtc->enabled) - return; - rcar_du_crtc_get(rcrtc); rcar_du_crtc_start(rcrtc); - - rcrtc->enabled = true; } static void rcar_du_crtc_disable(struct drm_crtc *crtc) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - if (!rcrtc->enabled) - return; - rcar_du_crtc_stop(rcrtc); rcar_du_crtc_put(rcrtc); - rcrtc->enabled = false; rcrtc->outputs = 0; } @@ -511,6 +514,9 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, rcrtc->event = event; spin_unlock_irqrestore(&dev->event_lock, flags); } + + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + rcar_du_vsp_atomic_begin(rcrtc); } static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc, @@ -519,6 +525,9 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc, struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); rcar_du_crtc_update_planes(rcrtc); + + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + rcar_du_vsp_atomic_flush(rcrtc); } static const struct drm_crtc_helper_funcs crtc_helper_funcs = { @@ -567,13 +576,14 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) { static const unsigned int mmio_offsets[] = { - DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET + DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET }; struct rcar_du_device *rcdu = rgrp->dev; struct platform_device *pdev = to_platform_device(rcdu->dev); struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index]; struct drm_crtc *crtc = &rcrtc->crtc; + struct drm_plane *primary; unsigned int irqflags; struct clk *clk; char clk_name[9]; @@ -609,10 +619,13 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) rcrtc->group = rgrp; rcrtc->mmio_offset = mmio_offsets[index]; rcrtc->index = index; - rcrtc->enabled = false; - ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, - &rgrp->planes[index % 2].plane, + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) + primary = &rcrtc->vsp->planes[0].plane; + else + primary = &rgrp->planes[index % 2].plane; + + ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL, &crtc_funcs, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 4b95d9d08c49..6f08b7e7db06 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -21,6 +21,7 @@ #include <drm/drm_crtc.h> struct rcar_du_group; +struct rcar_du_vsp; /** * struct rcar_du_crtc - the CRTC, representing a DU superposition processor @@ -33,7 +34,6 @@ struct rcar_du_group; * @event: event to post when the pending page flip completes * @flip_wait: wait queue used to signal page flip completion * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC - * @enabled: whether the CRTC is enabled, used to control system resume * @group: CRTC group this CRTC belongs to */ struct rcar_du_crtc { @@ -49,9 +49,9 @@ struct rcar_du_crtc { wait_queue_head_t flip_wait; unsigned int outputs; - bool enabled; struct rcar_du_group *group; + struct rcar_du_vsp *vsp; }; #define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) @@ -67,8 +67,6 @@ enum rcar_du_output { int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index); void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); -void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, - struct drm_file *file); void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 40422f6b645e..ed6006bf6bd8 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -1,7 +1,7 @@ /* * rcar_du_drv.c -- R-Car Display Unit DRM driver * - * Copyright (C) 2013-2014 Renesas Electronics Corporation + * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * @@ -36,6 +36,7 @@ */ static const struct rcar_du_device_info rcar_du_r8a7779_info = { + .gen = 2, .features = 0, .num_crtcs = 2, .routes = { @@ -57,6 +58,7 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { }; static const struct rcar_du_device_info rcar_du_r8a7790_info = { + .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS, .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES, @@ -86,6 +88,7 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { /* M2-W (r8a7791) and M2-N (r8a7793) are identical */ static const struct rcar_du_device_info rcar_du_r8a7791_info = { + .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS, .num_crtcs = 2, @@ -108,6 +111,7 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = { }; static const struct rcar_du_device_info rcar_du_r8a7794_info = { + .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS, .num_crtcs = 2, @@ -129,12 +133,37 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = { .num_lvds = 0, }; +static const struct rcar_du_device_info rcar_du_r8a7795_info = { + .gen = 3, + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + | RCAR_DU_FEATURE_EXT_CTRL_REGS + | RCAR_DU_FEATURE_VSP1_SOURCE, + .num_crtcs = 4, + .routes = { + /* R8A7795 has one RGB output, one LVDS output and two + * (currently unsupported) HDMI outputs. + */ + [RCAR_DU_OUTPUT_DPAD0] = { + .possible_crtcs = BIT(3), + .encoder_type = DRM_MODE_ENCODER_NONE, + .port = 0, + }, + [RCAR_DU_OUTPUT_LVDS0] = { + .possible_crtcs = BIT(0), + .encoder_type = DRM_MODE_ENCODER_LVDS, + .port = 3, + }, + }, + .num_lvds = 1, +}; + static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info }, { .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info }, { .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info }, + { .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info }, { } }; @@ -144,91 +173,6 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table); * DRM operations */ -static int rcar_du_unload(struct drm_device *dev) -{ - struct rcar_du_device *rcdu = dev->dev_private; - - if (rcdu->fbdev) - drm_fbdev_cma_fini(rcdu->fbdev); - - drm_kms_helper_poll_fini(dev); - drm_mode_config_cleanup(dev); - drm_vblank_cleanup(dev); - - dev->irq_enabled = 0; - dev->dev_private = NULL; - - return 0; -} - -static int rcar_du_load(struct drm_device *dev, unsigned long flags) -{ - struct platform_device *pdev = dev->platformdev; - struct device_node *np = pdev->dev.of_node; - struct rcar_du_device *rcdu; - struct resource *mem; - int ret; - - if (np == NULL) { - dev_err(dev->dev, "no platform data\n"); - return -ENODEV; - } - - rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL); - if (rcdu == NULL) { - dev_err(dev->dev, "failed to allocate private data\n"); - return -ENOMEM; - } - - init_waitqueue_head(&rcdu->commit.wait); - - rcdu->dev = &pdev->dev; - rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data; - rcdu->ddev = dev; - dev->dev_private = rcdu; - - /* I/O resources */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); - if (IS_ERR(rcdu->mmio)) - return PTR_ERR(rcdu->mmio); - - /* Initialize vertical blanking interrupts handling. Start with vblank - * disabled for all CRTCs. - */ - ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1); - if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize vblank\n"); - goto done; - } - - /* DRM/KMS objects */ - ret = rcar_du_modeset_init(rcdu); - if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); - goto done; - } - - dev->irq_enabled = 1; - - platform_set_drvdata(pdev, rcdu); - -done: - if (ret) - rcar_du_unload(dev); - - return ret; -} - -static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file) -{ - struct rcar_du_device *rcdu = dev->dev_private; - unsigned int i; - - for (i = 0; i < rcdu->num_crtcs; ++i) - rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file); -} - static void rcar_du_lastclose(struct drm_device *dev) { struct rcar_du_device *rcdu = dev->dev_private; @@ -269,11 +213,7 @@ static const struct file_operations rcar_du_fops = { static struct drm_driver rcar_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, - .load = rcar_du_load, - .unload = rcar_du_unload, - .preclose = rcar_du_preclose, .lastclose = rcar_du_lastclose, - .set_busid = drm_platform_set_busid, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = rcar_du_enable_vblank, .disable_vblank = rcar_du_disable_vblank, @@ -333,18 +273,116 @@ static const struct dev_pm_ops rcar_du_pm_ops = { * Platform driver */ -static int rcar_du_probe(struct platform_device *pdev) +static int rcar_du_remove(struct platform_device *pdev) { - return drm_platform_init(&rcar_du_driver, pdev); + struct rcar_du_device *rcdu = platform_get_drvdata(pdev); + struct drm_device *ddev = rcdu->ddev; + + mutex_lock(&ddev->mode_config.mutex); + drm_connector_unplug_all(ddev); + mutex_unlock(&ddev->mode_config.mutex); + + drm_dev_unregister(ddev); + + if (rcdu->fbdev) + drm_fbdev_cma_fini(rcdu->fbdev); + + drm_kms_helper_poll_fini(ddev); + drm_mode_config_cleanup(ddev); + drm_vblank_cleanup(ddev); + + drm_dev_unref(ddev); + + return 0; } -static int rcar_du_remove(struct platform_device *pdev) +static int rcar_du_probe(struct platform_device *pdev) { - struct rcar_du_device *rcdu = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + struct rcar_du_device *rcdu; + struct drm_connector *connector; + struct drm_device *ddev; + struct resource *mem; + int ret; + + if (np == NULL) { + dev_err(&pdev->dev, "no device tree node\n"); + return -ENODEV; + } + + /* Allocate and initialize the DRM and R-Car device structures. */ + rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL); + if (rcdu == NULL) + return -ENOMEM; + + init_waitqueue_head(&rcdu->commit.wait); + + rcdu->dev = &pdev->dev; + rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data; + + ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev); + if (!ddev) + return -ENOMEM; + + drm_dev_set_unique(ddev, dev_name(&pdev->dev)); + + rcdu->ddev = ddev; + ddev->dev_private = rcdu; + + platform_set_drvdata(pdev, rcdu); + + /* I/O resources */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(rcdu->mmio)) { + ret = PTR_ERR(rcdu->mmio); + goto error; + } - drm_put_dev(rcdu->ddev); + /* Initialize vertical blanking interrupts handling. Start with vblank + * disabled for all CRTCs. + */ + ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1); + if (ret < 0) { + dev_err(&pdev->dev, "failed to initialize vblank\n"); + goto error; + } + + /* DRM/KMS objects */ + ret = rcar_du_modeset_init(rcdu); + if (ret < 0) { + dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); + goto error; + } + + ddev->irq_enabled = 1; + + /* Register the DRM device with the core and the connectors with + * sysfs. + */ + ret = drm_dev_register(ddev, 0); + if (ret) + goto error; + + mutex_lock(&ddev->mode_config.mutex); + drm_for_each_connector(connector, ddev) { + ret = drm_connector_register(connector); + if (ret < 0) + break; + } + mutex_unlock(&ddev->mode_config.mutex); + + if (ret < 0) + goto error; + + DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); return 0; + +error: + rcar_du_remove(pdev); + + return ret; } static struct platform_driver rcar_du_platform_driver = { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 9f34fc86436a..ed35467d96cf 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -1,7 +1,7 @@ /* * rcar_du_drv.h -- R-Car Display Unit DRM driver * - * Copyright (C) 2013-2014 Renesas Electronics Corporation + * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * @@ -19,6 +19,7 @@ #include "rcar_du_crtc.h" #include "rcar_du_group.h" +#include "rcar_du_vsp.h" struct clk; struct device; @@ -29,6 +30,7 @@ struct rcar_du_lvdsenc; #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */ #define RCAR_DU_FEATURE_EXT_CTRL_REGS (1 << 1) /* Has extended control registers */ +#define RCAR_DU_FEATURE_VSP1_SOURCE (1 << 2) /* Has inputs from VSP1 */ #define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */ #define RCAR_DU_QUIRK_LVDS_LANES (1 << 1) /* LVDS lanes 1 and 3 inverted */ @@ -51,6 +53,7 @@ struct rcar_du_output_routing { /* * struct rcar_du_device_info - DU model-specific information + * @gen: device generation (2 or 3) * @features: device features (RCAR_DU_FEATURE_*) * @quirks: device quirks (RCAR_DU_QUIRK_*) * @num_crtcs: total number of CRTCs @@ -58,6 +61,7 @@ struct rcar_du_output_routing { * @num_lvds: number of internal LVDS encoders */ struct rcar_du_device_info { + unsigned int gen; unsigned int features; unsigned int quirks; unsigned int num_crtcs; @@ -65,9 +69,10 @@ struct rcar_du_device_info { unsigned int num_lvds; }; -#define RCAR_DU_MAX_CRTCS 3 +#define RCAR_DU_MAX_CRTCS 4 #define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2) #define RCAR_DU_MAX_LVDS 2 +#define RCAR_DU_MAX_VSPS 4 struct rcar_du_device { struct device *dev; @@ -82,6 +87,7 @@ struct rcar_du_device { unsigned int num_crtcs; struct rcar_du_group groups[RCAR_DU_MAX_GROUPS]; + struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS]; struct { struct drm_property *alpha; @@ -90,6 +96,8 @@ struct rcar_du_device { } props; unsigned int dpad0_source; + unsigned int vspd1_sink; + struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS]; struct { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index c08700757feb..4e939e41f030 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -89,12 +89,8 @@ static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder, /* The flat panel mode is fixed, just copy it to the adjusted mode. */ drm_mode_copy(adjusted_mode, panel_mode); - /* The internal LVDS encoder has a clock frequency operating range of - * 30MHz to 150MHz. Clamp the clock accordingly. - */ if (renc->lvds) - adjusted_mode->clock = clamp(adjusted_mode->clock, - 30000, 150000); + rcar_du_lvdsenc_atomic_check(renc->lvds, adjusted_mode); return 0; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 8e2ffe025153..33b2fc53da3e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -1,7 +1,7 @@ /* * rcar_du_group.c -- R-Car Display Unit Channels Pair * - * Copyright (C) 2013-2014 Renesas Electronics Corporation + * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * @@ -44,29 +44,64 @@ void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data) rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data); } +static void rcar_du_group_setup_pins(struct rcar_du_group *rgrp) +{ + u32 defr6 = DEFR6_CODE | DEFR6_ODPM12_DISP; + + if (rgrp->num_crtcs > 1) + defr6 |= DEFR6_ODPM22_DISP; + + rcar_du_group_write(rgrp, DEFR6, defr6); +} + static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp) { - u32 defr8 = DEFR8_CODE | DEFR8_DEFE8; + struct rcar_du_device *rcdu = rgrp->dev; + unsigned int possible_crtcs = + rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs; + u32 defr8 = DEFR8_CODE; - /* The DEFR8 register for the first group also controls RGB output - * routing to DPAD0 for DU instances that support it. - */ - if (rgrp->dev->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs > 1 && - rgrp->index == 0) - defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source); + if (rcdu->info->gen < 3) { + defr8 |= DEFR8_DEFE8; + + /* On Gen2 the DEFR8 register for the first group also controls + * RGB output routing to DPAD0 and VSPD1 routing to DU0/1/2 for + * DU instances that support it. + */ + if (rgrp->index == 0) { + if (possible_crtcs > 1) + defr8 |= DEFR8_DRGBS_DU(rcdu->dpad0_source); + if (rgrp->dev->vspd1_sink == 2) + defr8 |= DEFR8_VSCS; + } + } else { + /* On Gen3 VSPD routing can't be configured, but DPAD routing + * needs to be set despite having a single option available. + */ + u32 crtc = ffs(possible_crtcs) - 1; + + if (crtc / 2 == rgrp->index) + defr8 |= DEFR8_DRGBS_DU(crtc); + } rcar_du_group_write(rgrp, DEFR8, defr8); } static void rcar_du_group_setup(struct rcar_du_group *rgrp) { + struct rcar_du_device *rcdu = rgrp->dev; + /* Enable extended features */ rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE); - rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G); - rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3); - rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE); + if (rcdu->info->gen < 3) { + rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G); + rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3); + rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE); + } rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5); + rcar_du_group_setup_pins(rgrp); + if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) { rcar_du_group_setup_defr8(rgrp); @@ -82,6 +117,9 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp) DIDSR_PDCS_CLK(0, 0)); } + if (rcdu->info->gen >= 3) + rcar_du_group_write(rgrp, DEFR10, DEFR10_CODE | DEFR10_DEFE10); + /* Use DS1PR and DS2PR to configure planes priorities and connects the * superposition 0 to DU0 pins. DU1 pins will be configured dynamically. */ @@ -158,21 +196,23 @@ void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) void rcar_du_group_restart(struct rcar_du_group *rgrp) { + rgrp->need_restart = false; + __rcar_du_group_start_stop(rgrp, false); __rcar_du_group_start_stop(rgrp, true); } -static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu) +int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu) { int ret; if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS)) return 0; - /* RGB output routing to DPAD0 is configured in the DEFR8 register of - * the first group. As this function can be called with the DU0 and DU1 - * CRTCs disabled, we need to enable the first group clock before - * accessing the register. + /* RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are + * configured in the DEFR8 register of the first group. As this function + * can be called with the DU0 and DU1 CRTCs disabled, we need to enable + * the first group clock before accessing the register. */ ret = clk_prepare_enable(rcdu->crtcs[0].clock); if (ret < 0) @@ -203,5 +243,5 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp) rcar_du_group_write(rgrp, DORCR, dorcr); - return rcar_du_set_dpad0_routing(rgrp->dev); + return rcar_du_set_dpad0_vsp1_routing(rgrp->dev); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h index d7318e1a6b00..5e3adc6b31b5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h @@ -32,6 +32,7 @@ struct rcar_du_device; * @dptsr_planes: bitmask of planes driven by dot-clock and timing generator 1 * @num_planes: number of planes in the group * @planes: planes handled by the group + * @need_restart: the group needs to be restarted due to a configuration change */ struct rcar_du_group { struct rcar_du_device *dev; @@ -47,6 +48,7 @@ struct rcar_du_group { unsigned int num_planes; struct rcar_du_plane planes[RCAR_DU_NUM_KMS_PLANES]; + bool need_restart; }; u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg); @@ -58,4 +60,6 @@ void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start); void rcar_du_group_restart(struct rcar_du_group *rgrp); int rcar_du_group_set_routing(struct rcar_du_group *rgrp); +int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu); + #endif /* __RCAR_DU_GROUP_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c index a37b6e2fe51a..6c927144b5c9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c @@ -55,12 +55,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { .best_encoder = rcar_du_connector_best_encoder, }; -static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - static enum drm_connector_status rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force) { @@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = { .reset = drm_atomic_helper_connector_reset, .detect = rcar_du_hdmi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = rcar_du_hdmi_connector_destroy, + .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -108,9 +102,6 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_connector_register(connector); - if (ret < 0) - return ret; connector->dpms = DRM_MODE_DPMS_OFF; drm_object_property_set_value(&connector->base, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c index 2567efcbee36..461662d231e2 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c @@ -71,12 +71,9 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; const struct drm_display_mode *mode = &crtc_state->mode; - /* The internal LVDS encoder has a clock frequency operating range of - * 30MHz to 150MHz. Clamp the clock accordingly. - */ if (hdmienc->renc->lvds) - adjusted_mode->clock = clamp(adjusted_mode->clock, - 30000, 150000); + rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds, + adjusted_mode); if (sfuncs->mode_fixup == NULL) return 0; @@ -134,12 +131,19 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, /* Locate the slave I2C device and driver. */ i2c_slave = of_find_i2c_device_by_node(np); - if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) + if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) { + dev_dbg(rcdu->dev, + "can't get I2C slave for %s, deferring probe\n", + of_node_full_name(np)); return -EPROBE_DEFER; + } hdmienc->dev = &i2c_slave->dev; if (hdmienc->dev->driver == NULL) { + dev_dbg(rcdu->dev, + "I2C slave %s not probed yet, deferring probe\n", + dev_name(hdmienc->dev)); ret = -EPROBE_DEFER; goto error; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 43bce69d8560..24725bf859b4 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -1,7 +1,7 @@ /* * rcar_du_kms.c -- R-Car Display Unit Mode Setting * - * Copyright (C) 2013-2014 Renesas Electronics Corporation + * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * @@ -28,6 +28,7 @@ #include "rcar_du_kms.h" #include "rcar_du_lvdsenc.h" #include "rcar_du_regs.h" +#include "rcar_du_vsp.h" /* ----------------------------------------------------------------------------- * Format helpers @@ -89,13 +90,44 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { - /* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */ .fourcc = DRM_FORMAT_NV16, .bpp = 16, .planes = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, + /* The following formats are not supported on Gen2 and thus have no + * associated .pnmr or .edf settings. + */ + { + .fourcc = DRM_FORMAT_NV61, + .bpp = 16, + .planes = 2, + }, { + .fourcc = DRM_FORMAT_YUV420, + .bpp = 12, + .planes = 3, + }, { + .fourcc = DRM_FORMAT_YVU420, + .bpp = 12, + .planes = 3, + }, { + .fourcc = DRM_FORMAT_YUV422, + .bpp = 16, + .planes = 3, + }, { + .fourcc = DRM_FORMAT_YVU422, + .bpp = 16, + .planes = 3, + }, { + .fourcc = DRM_FORMAT_YUV444, + .bpp = 24, + .planes = 3, + }, { + .fourcc = DRM_FORMAT_YVU444, + .bpp = 24, + .planes = 3, + }, }; const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc) @@ -143,6 +175,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, unsigned int max_pitch; unsigned int align; unsigned int bpp; + unsigned int i; format = rcar_du_format_info(mode_cmd->pixel_format); if (format == NULL) { @@ -155,7 +188,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, * The pitch and alignment constraints are expressed in pixels on the * hardware side and in bytes in the DRM API. */ - bpp = format->planes == 2 ? 1 : format->bpp / 8; + bpp = format->planes == 1 ? format->bpp / 8 : 1; max_pitch = 4096 * bpp; if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B)) @@ -170,8 +203,8 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-EINVAL); } - if (format->planes == 2) { - if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) { + for (i = 1; i < format->planes; ++i) { + if (mode_cmd->pitches[i] != mode_cmd->pitches[0]) { dev_dbg(dev->dev, "luma and chroma pitches do not match\n"); return ERR_PTR(-EINVAL); @@ -192,252 +225,20 @@ static void rcar_du_output_poll_changed(struct drm_device *dev) * Atomic Check and Update */ -/* - * Atomic hardware plane allocator - * - * The hardware plane allocator is solely based on the atomic plane states - * without keeping any external state to avoid races between .atomic_check() - * and .atomic_commit(). - * - * The core idea is to avoid using a free planes bitmask that would need to be - * shared between check and commit handlers with a collective knowledge based on - * the allocated hardware plane(s) for each KMS plane. The allocator then loops - * over all plane states to compute the free planes bitmask, allocates hardware - * planes based on that bitmask, and stores the result back in the plane states. - * - * For this to work we need to access the current state of planes not touched by - * the atomic update. To ensure that it won't be modified, we need to lock all - * planes using drm_atomic_get_plane_state(). This effectively serializes atomic - * updates from .atomic_check() up to completion (when swapping the states if - * the check step has succeeded) or rollback (when freeing the states if the - * check step has failed). - * - * Allocation is performed in the .atomic_check() handler and applied - * automatically when the core swaps the old and new states. - */ - -static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane, - struct rcar_du_plane_state *state) -{ - const struct rcar_du_format_info *cur_format; - - cur_format = to_rcar_plane_state(plane->plane.state)->format; - - /* Lowering the number of planes doesn't strictly require reallocation - * as the extra hardware plane will be freed when committing, but doing - * so could lead to more fragmentation. - */ - return !cur_format || cur_format->planes != state->format->planes; -} - -static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state) -{ - unsigned int mask; - - if (state->hwindex == -1) - return 0; - - mask = 1 << state->hwindex; - if (state->format->planes == 2) - mask |= 1 << ((state->hwindex + 1) % 8); - - return mask; -} - -static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free) -{ - unsigned int i; - - for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) { - if (!(free & (1 << i))) - continue; - - if (num_planes == 1 || free & (1 << ((i + 1) % 8))) - break; - } - - return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i; -} - static int rcar_du_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { struct rcar_du_device *rcdu = dev->dev_private; - unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, }; - unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, }; - bool needs_realloc = false; - unsigned int groups = 0; - unsigned int i; int ret; ret = drm_atomic_helper_check(dev, state); if (ret < 0) return ret; - /* Check if hardware planes need to be reallocated. */ - for (i = 0; i < dev->mode_config.num_total_plane; ++i) { - struct rcar_du_plane_state *plane_state; - struct rcar_du_plane *plane; - unsigned int index; - - if (!state->planes[i]) - continue; - - plane = to_rcar_plane(state->planes[i]); - plane_state = to_rcar_plane_state(state->plane_states[i]); - - dev_dbg(rcdu->dev, "%s: checking plane (%u,%u)\n", __func__, - plane->group->index, plane - plane->group->planes); - - /* If the plane is being disabled we don't need to go through - * the full reallocation procedure. Just mark the hardware - * plane(s) as freed. - */ - if (!plane_state->format) { - dev_dbg(rcdu->dev, "%s: plane is being disabled\n", - __func__); - index = plane - plane->group->planes; - group_freed_planes[plane->group->index] |= 1 << index; - plane_state->hwindex = -1; - continue; - } - - /* If the plane needs to be reallocated mark it as such, and - * mark the hardware plane(s) as free. - */ - if (rcar_du_plane_needs_realloc(plane, plane_state)) { - dev_dbg(rcdu->dev, "%s: plane needs reallocation\n", - __func__); - groups |= 1 << plane->group->index; - needs_realloc = true; - - index = plane - plane->group->planes; - group_freed_planes[plane->group->index] |= 1 << index; - plane_state->hwindex = -1; - } - } - - if (!needs_realloc) + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) return 0; - /* Grab all plane states for the groups that need reallocation to ensure - * locking and avoid racy updates. This serializes the update operation, - * but there's not much we can do about it as that's the hardware - * design. - * - * Compute the used planes mask for each group at the same time to avoid - * looping over the planes separately later. - */ - while (groups) { - unsigned int index = ffs(groups) - 1; - struct rcar_du_group *group = &rcdu->groups[index]; - unsigned int used_planes = 0; - - dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n", - __func__, index); - - for (i = 0; i < group->num_planes; ++i) { - struct rcar_du_plane *plane = &group->planes[i]; - struct rcar_du_plane_state *plane_state; - struct drm_plane_state *s; - - s = drm_atomic_get_plane_state(state, &plane->plane); - if (IS_ERR(s)) - return PTR_ERR(s); - - /* If the plane has been freed in the above loop its - * hardware planes must not be added to the used planes - * bitmask. However, the current state doesn't reflect - * the free state yet, as we've modified the new state - * above. Use the local freed planes list to check for - * that condition instead. - */ - if (group_freed_planes[index] & (1 << i)) { - dev_dbg(rcdu->dev, - "%s: plane (%u,%u) has been freed, skipping\n", - __func__, plane->group->index, - plane - plane->group->planes); - continue; - } - - plane_state = to_rcar_plane_state(plane->plane.state); - used_planes |= rcar_du_plane_hwmask(plane_state); - - dev_dbg(rcdu->dev, - "%s: plane (%u,%u) uses %u hwplanes (index %d)\n", - __func__, plane->group->index, - plane - plane->group->planes, - plane_state->format ? - plane_state->format->planes : 0, - plane_state->hwindex); - } - - group_free_planes[index] = 0xff & ~used_planes; - groups &= ~(1 << index); - - dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n", - __func__, index, group_free_planes[index]); - } - - /* Reallocate hardware planes for each plane that needs it. */ - for (i = 0; i < dev->mode_config.num_total_plane; ++i) { - struct rcar_du_plane_state *plane_state; - struct rcar_du_plane *plane; - unsigned int crtc_planes; - unsigned int free; - int idx; - - if (!state->planes[i]) - continue; - - plane = to_rcar_plane(state->planes[i]); - plane_state = to_rcar_plane_state(state->plane_states[i]); - - dev_dbg(rcdu->dev, "%s: allocating plane (%u,%u)\n", __func__, - plane->group->index, plane - plane->group->planes); - - /* Skip planes that are being disabled or don't need to be - * reallocated. - */ - if (!plane_state->format || - !rcar_du_plane_needs_realloc(plane, plane_state)) - continue; - - /* Try to allocate the plane from the free planes currently - * associated with the target CRTC to avoid restarting the CRTC - * group and thus minimize flicker. If it fails fall back to - * allocating from all free planes. - */ - crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2 - ? plane->group->dptsr_planes - : ~plane->group->dptsr_planes; - free = group_free_planes[plane->group->index]; - - idx = rcar_du_plane_hwalloc(plane_state->format->planes, - free & crtc_planes); - if (idx < 0) - idx = rcar_du_plane_hwalloc(plane_state->format->planes, - free); - if (idx < 0) { - dev_dbg(rcdu->dev, "%s: no available hardware plane\n", - __func__); - return idx; - } - - dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n", - __func__, plane_state->format->planes, idx); - - plane_state->hwindex = idx; - - group_free_planes[plane->group->index] &= - ~rcar_du_plane_hwmask(plane_state); - - dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n", - __func__, plane->group->index, - group_free_planes[plane->group->index]); - } - - return 0; + return rcar_du_atomic_check_planes(dev, state); } struct rcar_du_commit { @@ -456,7 +257,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit) /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); drm_atomic_helper_commit_modeset_enables(dev, old_state); - drm_atomic_helper_commit_planes(dev, old_state, false); + drm_atomic_helper_commit_planes(dev, old_state, true); drm_atomic_helper_wait_for_vblanks(dev, old_state); @@ -775,14 +576,34 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U); /* If we have more than one CRTCs in this group pre-associate - * planes 0-3 with CRTC 0 and planes 4-7 with CRTC 1 to minimize - * flicker occurring when the association is changed. + * the low-order planes with CRTC 0 and the high-order planes + * with CRTC 1 to minimize flicker occurring when the + * association is changed. */ - rgrp->dptsr_planes = rgrp->num_crtcs > 1 ? 0xf0 : 0; + rgrp->dptsr_planes = rgrp->num_crtcs > 1 + ? (rcdu->info->gen >= 3 ? 0x04 : 0xf0) + : 0; - ret = rcar_du_planes_init(rgrp); - if (ret < 0) - return ret; + if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) { + ret = rcar_du_planes_init(rgrp); + if (ret < 0) + return ret; + } + } + + /* Initialize the compositors. */ + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) { + for (i = 0; i < rcdu->num_crtcs; ++i) { + struct rcar_du_vsp *vsp = &rcdu->vsps[i]; + + vsp->index = i; + vsp->dev = rcdu; + rcdu->crtcs[i].vsp = vsp; + + ret = rcar_du_vsp_init(vsp); + if (ret < 0) + return ret; + } } /* Create the CRTCs. */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index 0c43032fc693..e905f5da7aaa 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -62,12 +62,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { .best_encoder = rcar_du_connector_best_encoder, }; -static void rcar_du_lvds_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - static enum drm_connector_status rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force) { @@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = { .reset = drm_atomic_helper_connector_reset, .detect = rcar_du_lvds_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = rcar_du_lvds_connector_destroy, + .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -117,9 +111,6 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_connector_register(connector); - if (ret < 0) - return ret; connector->dpms = DRM_MODE_DPMS_OFF; drm_object_property_set_value(&connector->base, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c index 85043c5bad03..ef3a50321ecc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c @@ -38,35 +38,106 @@ static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data) iowrite32(data, lvds->mmio + reg); } -static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, - struct rcar_du_crtc *rcrtc) +static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, + struct rcar_du_crtc *rcrtc) { const struct drm_display_mode *mode = &rcrtc->crtc.mode; unsigned int freq = mode->clock; u32 lvdcr0; - u32 lvdhcr; u32 pllcr; - int ret; - - if (lvds->enabled) - return 0; - - ret = clk_prepare_enable(lvds->clock); - if (ret < 0) - return ret; /* PLL clock configuration */ - if (freq <= 38000) + if (freq < 39000) pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M; - else if (freq <= 60000) + else if (freq < 61000) pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M; - else if (freq <= 121000) + else if (freq < 121000) pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M; else pllcr = LVDPLLCR_PLLDLYCNT_150M; rcar_lvds_write(lvds, LVDPLLCR, pllcr); + /* Select the input, hardcode mode 0, enable LVDS operation and turn + * bias circuitry on. + */ + lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN; + if (rcrtc->index == 2) + lvdcr0 |= LVDCR0_DUSEL; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + /* Turn all the channels on. */ + rcar_lvds_write(lvds, LVDCR1, + LVDCR1_CHSTBY_GEN2(3) | LVDCR1_CHSTBY_GEN2(2) | + LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) | + LVDCR1_CLKSTBY_GEN2); + + /* Turn the PLL on, wait for the startup delay, and turn the output + * on. + */ + lvdcr0 |= LVDCR0_PLLON; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + usleep_range(100, 150); + + lvdcr0 |= LVDCR0_LVRES; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); +} + +static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, + struct rcar_du_crtc *rcrtc) +{ + const struct drm_display_mode *mode = &rcrtc->crtc.mode; + unsigned int freq = mode->clock; + u32 lvdcr0; + u32 pllcr; + + /* PLL clock configuration */ + if (freq < 42000) + pllcr = LVDPLLCR_PLLDIVCNT_42M; + else if (freq < 85000) + pllcr = LVDPLLCR_PLLDIVCNT_85M; + else if (freq < 128000) + pllcr = LVDPLLCR_PLLDIVCNT_128M; + else + pllcr = LVDPLLCR_PLLDIVCNT_148M; + + rcar_lvds_write(lvds, LVDPLLCR, pllcr); + + /* Turn the PLL on, set it to LVDS normal mode, wait for the startup + * delay and turn the output on. + */ + lvdcr0 = LVDCR0_PLLON; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + lvdcr0 |= LVDCR0_PWD; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + usleep_range(100, 150); + + lvdcr0 |= LVDCR0_LVRES; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + /* Turn all the channels on. */ + rcar_lvds_write(lvds, LVDCR1, + LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) | + LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) | + LVDCR1_CLKSTBY_GEN3); +} + +static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, + struct rcar_du_crtc *rcrtc) +{ + u32 lvdhcr; + int ret; + + if (lvds->enabled) + return 0; + + ret = clk_prepare_enable(lvds->clock); + if (ret < 0) + return ret; + /* Hardcode the channels and control signals routing for now. * * HSYNC -> CTRL0 @@ -87,30 +158,14 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, rcar_lvds_write(lvds, LVDCHCR, lvdhcr); - /* Select the input, hardcode mode 0, enable LVDS operation and turn - * bias circuitry on. - */ - lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN; - if (rcrtc->index == 2) - lvdcr0 |= LVDCR0_DUSEL; - rcar_lvds_write(lvds, LVDCR0, lvdcr0); - - /* Turn all the channels on. */ - rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) | - LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY); - - /* Turn the PLL on, wait for the startup delay, and turn the output - * on. - */ - lvdcr0 |= LVDCR0_PLLEN; - rcar_lvds_write(lvds, LVDCR0, lvdcr0); - - usleep_range(100, 150); - - lvdcr0 |= LVDCR0_LVRES; - rcar_lvds_write(lvds, LVDCR0, lvdcr0); + /* Perform generation-specific initialization. */ + if (lvds->dev->info->gen < 3) + rcar_du_lvdsenc_start_gen2(lvds, rcrtc); + else + rcar_du_lvdsenc_start_gen3(lvds, rcrtc); lvds->enabled = true; + return 0; } @@ -140,6 +195,21 @@ int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc, return -EINVAL; } +void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds, + struct drm_display_mode *mode) +{ + struct rcar_du_device *rcdu = lvds->dev; + + /* The internal LVDS encoder has a restricted clock frequency operating + * range (30MHz to 150MHz on Gen2, 25.175MHz to 148.5MHz on Gen3). Clamp + * the clock accordingly. + */ + if (rcdu->info->gen < 3) + mode->clock = clamp(mode->clock, 30000, 150000); + else + mode->clock = clamp(mode->clock, 25175, 148500); +} + static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds, struct platform_device *pdev) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h index 9a6001c07303..dfdba746edf4 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h @@ -30,6 +30,8 @@ enum rcar_lvds_input { int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu); int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc, bool enable); +void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds, + struct drm_display_mode *mode); #else static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu) { @@ -40,6 +42,10 @@ static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, { return 0; } +static inline void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds, + struct drm_display_mode *mode) +{ +} #endif #endif /* __RCAR_DU_LVDSENC_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index c3ed9522c0e1..8460ae1ffa4b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -1,7 +1,7 @@ /* * rcar_du_plane.c -- R-Car Display Unit Planes * - * Copyright (C) 2013-2014 Renesas Electronics Corporation + * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * @@ -12,6 +12,7 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> @@ -20,21 +21,300 @@ #include <drm/drm_plane_helper.h> #include "rcar_du_drv.h" +#include "rcar_du_group.h" #include "rcar_du_kms.h" #include "rcar_du_plane.h" #include "rcar_du_regs.h" -#define RCAR_DU_COLORKEY_NONE (0 << 24) -#define RCAR_DU_COLORKEY_SOURCE (1 << 24) -#define RCAR_DU_COLORKEY_MASK (1 << 24) +/* ----------------------------------------------------------------------------- + * Atomic hardware plane allocator + * + * The hardware plane allocator is solely based on the atomic plane states + * without keeping any external state to avoid races between .atomic_check() + * and .atomic_commit(). + * + * The core idea is to avoid using a free planes bitmask that would need to be + * shared between check and commit handlers with a collective knowledge based on + * the allocated hardware plane(s) for each KMS plane. The allocator then loops + * over all plane states to compute the free planes bitmask, allocates hardware + * planes based on that bitmask, and stores the result back in the plane states. + * + * For this to work we need to access the current state of planes not touched by + * the atomic update. To ensure that it won't be modified, we need to lock all + * planes using drm_atomic_get_plane_state(). This effectively serializes atomic + * updates from .atomic_check() up to completion (when swapping the states if + * the check step has succeeded) or rollback (when freeing the states if the + * check step has failed). + * + * Allocation is performed in the .atomic_check() handler and applied + * automatically when the core swaps the old and new states. + */ + +static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane, + struct rcar_du_plane_state *new_state) +{ + struct rcar_du_plane_state *cur_state; + + cur_state = to_rcar_plane_state(plane->plane.state); + + /* Lowering the number of planes doesn't strictly require reallocation + * as the extra hardware plane will be freed when committing, but doing + * so could lead to more fragmentation. + */ + if (!cur_state->format || + cur_state->format->planes != new_state->format->planes) + return true; + + /* Reallocate hardware planes if the source has changed. */ + if (cur_state->source != new_state->source) + return true; -static u32 rcar_du_plane_read(struct rcar_du_group *rgrp, - unsigned int index, u32 reg) + return false; +} + +static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state) +{ + unsigned int mask; + + if (state->hwindex == -1) + return 0; + + mask = 1 << state->hwindex; + if (state->format->planes == 2) + mask |= 1 << ((state->hwindex + 1) % 8); + + return mask; +} + +/* + * The R8A7790 DU can source frames directly from the VSP1 devices VSPD0 and + * VSPD1. VSPD0 feeds DU0/1 plane 0, and VSPD1 feeds either DU2 plane 0 or + * DU0/1 plane 1. + * + * Allocate the correct fixed plane when sourcing frames from VSPD0 or VSPD1, + * and allocate planes in reverse index order otherwise to ensure maximum + * availability of planes 0 and 1. + * + * The caller is responsible for ensuring that the requested source is + * compatible with the DU revision. + */ +static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane, + struct rcar_du_plane_state *state, + unsigned int free) { - return rcar_du_read(rgrp->dev, - rgrp->mmio_offset + index * PLANE_OFF + reg); + unsigned int num_planes = state->format->planes; + int fixed = -1; + int i; + + if (state->source == RCAR_DU_PLANE_VSPD0) { + /* VSPD0 feeds plane 0 on DU0/1. */ + if (plane->group->index != 0) + return -EINVAL; + + fixed = 0; + } else if (state->source == RCAR_DU_PLANE_VSPD1) { + /* VSPD1 feeds plane 1 on DU0/1 or plane 0 on DU2. */ + fixed = plane->group->index == 0 ? 1 : 0; + } + + if (fixed >= 0) + return free & (1 << fixed) ? fixed : -EBUSY; + + for (i = RCAR_DU_NUM_HW_PLANES - 1; i >= 0; --i) { + if (!(free & (1 << i))) + continue; + + if (num_planes == 1 || free & (1 << ((i + 1) % 8))) + break; + } + + return i < 0 ? -EBUSY : i; } +int rcar_du_atomic_check_planes(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct rcar_du_device *rcdu = dev->dev_private; + unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, }; + unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, }; + bool needs_realloc = false; + unsigned int groups = 0; + unsigned int i; + + /* Check if hardware planes need to be reallocated. */ + for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + struct rcar_du_plane_state *plane_state; + struct rcar_du_plane *plane; + unsigned int index; + + if (!state->planes[i]) + continue; + + plane = to_rcar_plane(state->planes[i]); + plane_state = to_rcar_plane_state(state->plane_states[i]); + + dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__, + plane->group->index, plane - plane->group->planes); + + /* If the plane is being disabled we don't need to go through + * the full reallocation procedure. Just mark the hardware + * plane(s) as freed. + */ + if (!plane_state->format) { + dev_dbg(rcdu->dev, "%s: plane is being disabled\n", + __func__); + index = plane - plane->group->planes; + group_freed_planes[plane->group->index] |= 1 << index; + plane_state->hwindex = -1; + continue; + } + + /* If the plane needs to be reallocated mark it as such, and + * mark the hardware plane(s) as free. + */ + if (rcar_du_plane_needs_realloc(plane, plane_state)) { + dev_dbg(rcdu->dev, "%s: plane needs reallocation\n", + __func__); + groups |= 1 << plane->group->index; + needs_realloc = true; + + index = plane - plane->group->planes; + group_freed_planes[plane->group->index] |= 1 << index; + plane_state->hwindex = -1; + } + } + + if (!needs_realloc) + return 0; + + /* Grab all plane states for the groups that need reallocation to ensure + * locking and avoid racy updates. This serializes the update operation, + * but there's not much we can do about it as that's the hardware + * design. + * + * Compute the used planes mask for each group at the same time to avoid + * looping over the planes separately later. + */ + while (groups) { + unsigned int index = ffs(groups) - 1; + struct rcar_du_group *group = &rcdu->groups[index]; + unsigned int used_planes = 0; + + dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n", + __func__, index); + + for (i = 0; i < group->num_planes; ++i) { + struct rcar_du_plane *plane = &group->planes[i]; + struct rcar_du_plane_state *plane_state; + struct drm_plane_state *s; + + s = drm_atomic_get_plane_state(state, &plane->plane); + if (IS_ERR(s)) + return PTR_ERR(s); + + /* If the plane has been freed in the above loop its + * hardware planes must not be added to the used planes + * bitmask. However, the current state doesn't reflect + * the free state yet, as we've modified the new state + * above. Use the local freed planes list to check for + * that condition instead. + */ + if (group_freed_planes[index] & (1 << i)) { + dev_dbg(rcdu->dev, + "%s: plane (%u,%tu) has been freed, skipping\n", + __func__, plane->group->index, + plane - plane->group->planes); + continue; + } + + plane_state = to_rcar_plane_state(plane->plane.state); + used_planes |= rcar_du_plane_hwmask(plane_state); + + dev_dbg(rcdu->dev, + "%s: plane (%u,%tu) uses %u hwplanes (index %d)\n", + __func__, plane->group->index, + plane - plane->group->planes, + plane_state->format ? + plane_state->format->planes : 0, + plane_state->hwindex); + } + + group_free_planes[index] = 0xff & ~used_planes; + groups &= ~(1 << index); + + dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n", + __func__, index, group_free_planes[index]); + } + + /* Reallocate hardware planes for each plane that needs it. */ + for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + struct rcar_du_plane_state *plane_state; + struct rcar_du_plane *plane; + unsigned int crtc_planes; + unsigned int free; + int idx; + + if (!state->planes[i]) + continue; + + plane = to_rcar_plane(state->planes[i]); + plane_state = to_rcar_plane_state(state->plane_states[i]); + + dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__, + plane->group->index, plane - plane->group->planes); + + /* Skip planes that are being disabled or don't need to be + * reallocated. + */ + if (!plane_state->format || + !rcar_du_plane_needs_realloc(plane, plane_state)) + continue; + + /* Try to allocate the plane from the free planes currently + * associated with the target CRTC to avoid restarting the CRTC + * group and thus minimize flicker. If it fails fall back to + * allocating from all free planes. + */ + crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2 + ? plane->group->dptsr_planes + : ~plane->group->dptsr_planes; + free = group_free_planes[plane->group->index]; + + idx = rcar_du_plane_hwalloc(plane, plane_state, + free & crtc_planes); + if (idx < 0) + idx = rcar_du_plane_hwalloc(plane, plane_state, + free); + if (idx < 0) { + dev_dbg(rcdu->dev, "%s: no available hardware plane\n", + __func__); + return idx; + } + + dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n", + __func__, plane_state->format->planes, idx); + + plane_state->hwindex = idx; + + group_free_planes[plane->group->index] &= + ~rcar_du_plane_hwmask(plane_state); + + dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n", + __func__, plane->group->index, + group_free_planes[plane->group->index]); + } + + return 0; +} + +/* ----------------------------------------------------------------------------- + * Plane Setup + */ + +#define RCAR_DU_COLORKEY_NONE (0 << 24) +#define RCAR_DU_COLORKEY_SOURCE (1 << 24) +#define RCAR_DU_COLORKEY_MASK (1 << 24) + static void rcar_du_plane_write(struct rcar_du_group *rgrp, unsigned int index, u32 reg, u32 data) { @@ -42,34 +322,45 @@ static void rcar_du_plane_write(struct rcar_du_group *rgrp, data); } -static void rcar_du_plane_setup_fb(struct rcar_du_plane *plane) +static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp, + const struct rcar_du_plane_state *state) { - struct rcar_du_plane_state *state = - to_rcar_plane_state(plane->plane.state); - struct drm_framebuffer *fb = plane->plane.state->fb; - struct rcar_du_group *rgrp = plane->group; unsigned int src_x = state->state.src_x >> 16; unsigned int src_y = state->state.src_y >> 16; unsigned int index = state->hwindex; - struct drm_gem_cma_object *gem; + unsigned int pitch; bool interlaced; - u32 mwr; + u32 dma[2]; interlaced = state->state.crtc->state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE; + if (state->source == RCAR_DU_PLANE_MEMORY) { + struct drm_framebuffer *fb = state->state.fb; + struct drm_gem_cma_object *gem; + unsigned int i; + + if (state->format->planes == 2) + pitch = fb->pitches[0]; + else + pitch = fb->pitches[0] * 8 / state->format->bpp; + + for (i = 0; i < state->format->planes; ++i) { + gem = drm_fb_cma_get_gem_obj(fb, i); + dma[i] = gem->paddr + fb->offsets[i]; + } + } else { + pitch = state->state.src_w >> 16; + dma[0] = 0; + dma[1] = 0; + } + /* Memory pitch (expressed in pixels). Must be doubled for interlaced * operation with 32bpp formats. */ - if (state->format->planes == 2) - mwr = fb->pitches[0]; - else - mwr = fb->pitches[0] * 8 / state->format->bpp; - - if (interlaced && state->format->bpp == 32) - mwr *= 2; - - rcar_du_plane_write(rgrp, index, PnMWR, mwr); + rcar_du_plane_write(rgrp, index, PnMWR, + (interlaced && state->format->bpp == 32) ? + pitch * 2 : pitch); /* The Y position is expressed in raster line units and must be doubled * for 32bpp formats, according to the R8A7790 datasheet. No mention of @@ -87,30 +378,25 @@ static void rcar_du_plane_setup_fb(struct rcar_du_plane *plane) rcar_du_plane_write(rgrp, index, PnSPYR, src_y * (!interlaced && state->format->bpp == 32 ? 2 : 1)); - gem = drm_fb_cma_get_gem_obj(fb, 0); - rcar_du_plane_write(rgrp, index, PnDSA0R, gem->paddr + fb->offsets[0]); + rcar_du_plane_write(rgrp, index, PnDSA0R, dma[0]); if (state->format->planes == 2) { index = (index + 1) % 8; - rcar_du_plane_write(rgrp, index, PnMWR, fb->pitches[0]); + rcar_du_plane_write(rgrp, index, PnMWR, pitch); rcar_du_plane_write(rgrp, index, PnSPXR, src_x); rcar_du_plane_write(rgrp, index, PnSPYR, src_y * (state->format->bpp == 16 ? 2 : 1) / 2); - gem = drm_fb_cma_get_gem_obj(fb, 1); - rcar_du_plane_write(rgrp, index, PnDSA0R, - gem->paddr + fb->offsets[1]); + rcar_du_plane_write(rgrp, index, PnDSA0R, dma[1]); } } -static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, - unsigned int index) +static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp, + unsigned int index, + const struct rcar_du_plane_state *state) { - struct rcar_du_plane_state *state = - to_rcar_plane_state(plane->plane.state); - struct rcar_du_group *rgrp = plane->group; u32 colorkey; u32 pnmr; @@ -168,12 +454,10 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, } } -static void __rcar_du_plane_setup(struct rcar_du_plane *plane, - unsigned int index) +static void rcar_du_plane_setup_format_gen2(struct rcar_du_group *rgrp, + unsigned int index, + const struct rcar_du_plane_state *state) { - struct rcar_du_plane_state *state = - to_rcar_plane_state(plane->plane.state); - struct rcar_du_group *rgrp = plane->group; u32 ddcr2 = PnDDCR2_CODE; u32 ddcr4; @@ -182,11 +466,8 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, * The data format is selected by the DDDF field in PnMR and the EDF * field in DDCR4. */ - ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4); - ddcr4 &= ~PnDDCR4_EDF_MASK; - ddcr4 |= state->format->edf | PnDDCR4_CODE; - rcar_du_plane_setup_mode(plane, index); + rcar_du_plane_setup_mode(rgrp, index, state); if (state->format->planes == 2) { if (state->hwindex != index) { @@ -204,31 +485,72 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, } rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2); + + ddcr4 = state->format->edf | PnDDCR4_CODE; + if (state->source != RCAR_DU_PLANE_MEMORY) + ddcr4 |= PnDDCR4_VSPS; + rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4); +} + +static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp, + unsigned int index, + const struct rcar_du_plane_state *state) +{ + rcar_du_plane_write(rgrp, index, PnMR, + PnMR_SPIM_TP_OFF | state->format->pnmr); + + rcar_du_plane_write(rgrp, index, PnDDCR4, + state->format->edf | PnDDCR4_CODE); +} + +static void rcar_du_plane_setup_format(struct rcar_du_group *rgrp, + unsigned int index, + const struct rcar_du_plane_state *state) +{ + struct rcar_du_device *rcdu = rgrp->dev; + + if (rcdu->info->gen < 3) + rcar_du_plane_setup_format_gen2(rgrp, index, state); + else + rcar_du_plane_setup_format_gen3(rgrp, index, state); /* Destination position and size */ - rcar_du_plane_write(rgrp, index, PnDSXR, plane->plane.state->crtc_w); - rcar_du_plane_write(rgrp, index, PnDSYR, plane->plane.state->crtc_h); - rcar_du_plane_write(rgrp, index, PnDPXR, plane->plane.state->crtc_x); - rcar_du_plane_write(rgrp, index, PnDPYR, plane->plane.state->crtc_y); - - /* Wrap-around and blinking, disabled */ - rcar_du_plane_write(rgrp, index, PnWASPR, 0); - rcar_du_plane_write(rgrp, index, PnWAMWR, 4095); - rcar_du_plane_write(rgrp, index, PnBTR, 0); - rcar_du_plane_write(rgrp, index, PnMLR, 0); + rcar_du_plane_write(rgrp, index, PnDSXR, state->state.crtc_w); + rcar_du_plane_write(rgrp, index, PnDSYR, state->state.crtc_h); + rcar_du_plane_write(rgrp, index, PnDPXR, state->state.crtc_x); + rcar_du_plane_write(rgrp, index, PnDPYR, state->state.crtc_y); + + if (rcdu->info->gen < 3) { + /* Wrap-around and blinking, disabled */ + rcar_du_plane_write(rgrp, index, PnWASPR, 0); + rcar_du_plane_write(rgrp, index, PnWAMWR, 4095); + rcar_du_plane_write(rgrp, index, PnBTR, 0); + rcar_du_plane_write(rgrp, index, PnMLR, 0); + } } -void rcar_du_plane_setup(struct rcar_du_plane *plane) +void __rcar_du_plane_setup(struct rcar_du_group *rgrp, + const struct rcar_du_plane_state *state) { - struct rcar_du_plane_state *state = - to_rcar_plane_state(plane->plane.state); + struct rcar_du_device *rcdu = rgrp->dev; - __rcar_du_plane_setup(plane, state->hwindex); + rcar_du_plane_setup_format(rgrp, state->hwindex, state); if (state->format->planes == 2) - __rcar_du_plane_setup(plane, (state->hwindex + 1) % 8); + rcar_du_plane_setup_format(rgrp, (state->hwindex + 1) % 8, + state); - rcar_du_plane_setup_fb(plane); + if (rcdu->info->gen < 3) + rcar_du_plane_setup_scanout(rgrp, state); + + if (state->source == RCAR_DU_PLANE_VSPD1) { + unsigned int vspd1_sink = rgrp->index ? 2 : 0; + + if (rcdu->vspd1_sink != vspd1_sink) { + rcdu->vspd1_sink = vspd1_sink; + rcar_du_set_dpad0_vsp1_routing(rcdu); + } + } } static int rcar_du_plane_atomic_check(struct drm_plane *plane, @@ -263,9 +585,27 @@ static void rcar_du_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct rcar_du_plane *rplane = to_rcar_plane(plane); + struct rcar_du_plane_state *old_rstate; + struct rcar_du_plane_state *new_rstate; + + if (!plane->state->crtc) + return; + + rcar_du_plane_setup(rplane); + + /* Check whether the source has changed from memory to live source or + * from live source to memory. The source has been configured by the + * VSPS bit in the PnDDCR4 register. Although the datasheet states that + * the bit is updated during vertical blanking, it seems that updates + * only occur when the DU group is held in reset through the DSYSR.DRES + * bit. We thus need to restart the group if the source changes. + */ + old_rstate = to_rcar_plane_state(old_state); + new_rstate = to_rcar_plane_state(plane->state); - if (plane->state->crtc) - rcar_du_plane_setup(rplane); + if ((old_rstate->source == RCAR_DU_PLANE_MEMORY) != + (new_rstate->source == RCAR_DU_PLANE_MEMORY)) + rplane->group->need_restart = true; } static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = { @@ -313,6 +653,7 @@ static void rcar_du_plane_reset(struct drm_plane *plane) return; state->hwindex = -1; + state->source = RCAR_DU_PLANE_MEMORY; state->alpha = 255; state->colorkey = RCAR_DU_COLORKEY_NONE; state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h index 9732bff1911b..b18b7b25dbfa 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h @@ -28,6 +28,12 @@ struct rcar_du_group; #define RCAR_DU_NUM_KMS_PLANES 9 #define RCAR_DU_NUM_HW_PLANES 8 +enum rcar_du_plane_source { + RCAR_DU_PLANE_MEMORY, + RCAR_DU_PLANE_VSPD0, + RCAR_DU_PLANE_VSPD1, +}; + struct rcar_du_plane { struct drm_plane plane; struct rcar_du_group *group; @@ -52,6 +58,7 @@ struct rcar_du_plane_state { const struct rcar_du_format_info *format; int hwindex; + enum rcar_du_plane_source source; unsigned int alpha; unsigned int colorkey; @@ -64,8 +71,20 @@ to_rcar_plane_state(struct drm_plane_state *state) return container_of(state, struct rcar_du_plane_state, state); } +int rcar_du_atomic_check_planes(struct drm_device *dev, + struct drm_atomic_state *state); + int rcar_du_planes_init(struct rcar_du_group *rgrp); -void rcar_du_plane_setup(struct rcar_du_plane *plane); +void __rcar_du_plane_setup(struct rcar_du_group *rgrp, + const struct rcar_du_plane_state *state); + +static inline void rcar_du_plane_setup(struct rcar_du_plane *plane) +{ + struct rcar_du_plane_state *state = + to_rcar_plane_state(plane->plane.state); + + return __rcar_du_plane_setup(plane->group, state); +} #endif /* __RCAR_DU_PLANE_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index 70fcbc471ebd..d2f66068e52c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -1,7 +1,7 @@ /* * rcar_du_regs.h -- R-Car Display Unit Registers Definitions * - * Copyright (C) 2013 Renesas Electronics Corporation + * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * @@ -16,6 +16,7 @@ #define DU0_REG_OFFSET 0x00000 #define DU1_REG_OFFSET 0x30000 #define DU2_REG_OFFSET 0x40000 +#define DU3_REG_OFFSET 0x70000 /* ----------------------------------------------------------------------------- * Display Control Registers @@ -186,7 +187,7 @@ #define DEFR6 0x000e8 #define DEFR6_CODE (0x7778 << 16) -#define DEFR6_ODPM22_D2SMR (0 << 10) +#define DEFR6_ODPM22_DSMR (0 << 10) #define DEFR6_ODPM22_DISP (2 << 10) #define DEFR6_ODPM22_CDE (3 << 10) #define DEFR6_ODPM22_MASK (3 << 10) @@ -260,6 +261,21 @@ #define DIDSR_PDCS_CLK(n, clk) (clk << ((n) * 2)) #define DIDSR_PDCS_MASK(n) (3 << ((n) * 2)) +#define DEFR10 0x20038 +#define DEFR10_CODE (0x7795 << 16) +#define DEFR10_VSPF1_RGB (0 << 14) +#define DEFR10_VSPF1_YC (1 << 14) +#define DEFR10_DOCF1_RGB (0 << 12) +#define DEFR10_DOCF1_YC (1 << 12) +#define DEFR10_YCDF0_YCBCR444 (0 << 11) +#define DEFR10_YCDF0_YCBCR422 (1 << 11) +#define DEFR10_VSPF0_RGB (0 << 10) +#define DEFR10_VSPF0_YC (1 << 10) +#define DEFR10_DOCF0_RGB (0 << 8) +#define DEFR10_DOCF0_YC (1 << 8) +#define DEFR10_TSEL_H3_TCON1 (0 << 1) /* DEFR102 register only (DU2/DU3) */ +#define DEFR10_DEFE10 (1 << 0) + /* ----------------------------------------------------------------------------- * Display Timing Generation Registers */ @@ -389,6 +405,7 @@ #define PnDDCR4 0x00190 #define PnDDCR4_CODE (0x7766 << 16) +#define PnDDCR4_VSPS (1 << 13) #define PnDDCR4_SDFS_RGB (0 << 4) #define PnDDCR4_SDFS_YC (5 << 4) #define PnDDCR4_SDFS_MASK (7 << 4) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index e0a5d8f93963..9d7e5c99caf6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -31,12 +31,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { .best_encoder = rcar_du_connector_best_encoder, }; -static void rcar_du_vga_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - static enum drm_connector_status rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) { @@ -48,7 +42,7 @@ static const struct drm_connector_funcs connector_funcs = { .reset = drm_atomic_helper_connector_reset, .detect = rcar_du_vga_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = rcar_du_vga_connector_destroy, + .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -76,9 +70,6 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_connector_register(connector); - if (ret < 0) - return ret; connector->dpms = DRM_MODE_DPMS_OFF; drm_object_property_set_value(&connector->base, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c new file mode 100644 index 000000000000..de7ef041182b --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -0,0 +1,384 @@ +/* + * rcar_du_vsp.h -- R-Car Display Unit VSP-Based Compositor + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_plane_helper.h> + +#include <linux/of_platform.h> +#include <linux/videodev2.h> + +#include <media/vsp1.h> + +#include "rcar_du_drv.h" +#include "rcar_du_kms.h" +#include "rcar_du_vsp.h" + +void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) +{ + const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode; + struct rcar_du_device *rcdu = crtc->group->dev; + struct rcar_du_plane_state state = { + .state = { + .crtc = &crtc->crtc, + .crtc_x = 0, + .crtc_y = 0, + .crtc_w = mode->hdisplay, + .crtc_h = mode->vdisplay, + .src_x = 0, + .src_y = 0, + .src_w = mode->hdisplay << 16, + .src_h = mode->vdisplay << 16, + }, + .format = rcar_du_format_info(DRM_FORMAT_ARGB8888), + .source = RCAR_DU_PLANE_VSPD1, + .alpha = 255, + .colorkey = 0, + .zpos = 0, + }; + + if (rcdu->info->gen >= 3) + state.hwindex = (crtc->index % 2) ? 2 : 0; + else + state.hwindex = crtc->index % 2; + + __rcar_du_plane_setup(crtc->group, &state); + + /* Ensure that the plane source configuration takes effect by requesting + * a restart of the group. See rcar_du_plane_atomic_update() for a more + * detailed explanation. + * + * TODO: Check whether this is still needed on Gen3. + */ + crtc->group->need_restart = true; + + vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay); +} + +void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) +{ + vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0); +} + +void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) +{ + vsp1_du_atomic_begin(crtc->vsp->vsp); +} + +void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) +{ + vsp1_du_atomic_flush(crtc->vsp->vsp); +} + +/* Keep the two tables in sync. */ +static const u32 formats_kms[] = { + DRM_FORMAT_RGB332, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, + DRM_FORMAT_YUV422, + DRM_FORMAT_YVU422, + DRM_FORMAT_YUV444, + DRM_FORMAT_YVU444, +}; + +static const u32 formats_v4l2[] = { + V4L2_PIX_FMT_RGB332, + V4L2_PIX_FMT_ARGB444, + V4L2_PIX_FMT_XRGB444, + V4L2_PIX_FMT_ARGB555, + V4L2_PIX_FMT_XRGB555, + V4L2_PIX_FMT_RGB565, + V4L2_PIX_FMT_RGB24, + V4L2_PIX_FMT_BGR24, + V4L2_PIX_FMT_ARGB32, + V4L2_PIX_FMT_XRGB32, + V4L2_PIX_FMT_ABGR32, + V4L2_PIX_FMT_XBGR32, + V4L2_PIX_FMT_UYVY, + V4L2_PIX_FMT_VYUY, + V4L2_PIX_FMT_YUYV, + V4L2_PIX_FMT_YVYU, + V4L2_PIX_FMT_NV12M, + V4L2_PIX_FMT_NV21M, + V4L2_PIX_FMT_NV16M, + V4L2_PIX_FMT_NV61M, + V4L2_PIX_FMT_YUV420M, + V4L2_PIX_FMT_YVU420M, + V4L2_PIX_FMT_YUV422M, + V4L2_PIX_FMT_YVU422M, + V4L2_PIX_FMT_YUV444M, + V4L2_PIX_FMT_YVU444M, +}; + +static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) +{ + struct rcar_du_vsp_plane_state *state = + to_rcar_vsp_plane_state(plane->plane.state); + struct drm_framebuffer *fb = plane->plane.state->fb; + struct v4l2_rect src; + struct v4l2_rect dst; + dma_addr_t paddr[2] = { 0, }; + u32 pixelformat = 0; + unsigned int i; + + src.left = state->state.src_x >> 16; + src.top = state->state.src_y >> 16; + src.width = state->state.src_w >> 16; + src.height = state->state.src_h >> 16; + + dst.left = state->state.crtc_x; + dst.top = state->state.crtc_y; + dst.width = state->state.crtc_w; + dst.height = state->state.crtc_h; + + for (i = 0; i < state->format->planes; ++i) { + struct drm_gem_cma_object *gem; + + gem = drm_fb_cma_get_gem_obj(fb, i); + paddr[i] = gem->paddr + fb->offsets[i]; + } + + for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) { + if (formats_kms[i] == state->format->fourcc) { + pixelformat = formats_v4l2[i]; + break; + } + } + + WARN_ON(!pixelformat); + + vsp1_du_atomic_update(plane->vsp->vsp, plane->index, pixelformat, + fb->pitches[0], paddr, &src, &dst); +} + +static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state); + struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane); + struct rcar_du_device *rcdu = rplane->vsp->dev; + + if (!state->fb || !state->crtc) { + rstate->format = NULL; + return 0; + } + + if (state->src_w >> 16 != state->crtc_w || + state->src_h >> 16 != state->crtc_h) { + dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__); + return -EINVAL; + } + + rstate->format = rcar_du_format_info(state->fb->pixel_format); + if (rstate->format == NULL) { + dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__, + state->fb->pixel_format); + return -EINVAL; + } + + return 0; +} + +static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane); + + if (plane->state->crtc) + rcar_du_vsp_plane_setup(rplane); + else + vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, 0, 0, 0, + NULL, NULL); +} + +static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = { + .atomic_check = rcar_du_vsp_plane_atomic_check, + .atomic_update = rcar_du_vsp_plane_atomic_update, +}; + +static struct drm_plane_state * +rcar_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct rcar_du_vsp_plane_state *state; + struct rcar_du_vsp_plane_state *copy; + + if (WARN_ON(!plane->state)) + return NULL; + + state = to_rcar_vsp_plane_state(plane->state); + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (copy == NULL) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, ©->state); + + return ©->state; +} + +static void rcar_du_vsp_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(plane, state); + kfree(to_rcar_vsp_plane_state(state)); +} + +static void rcar_du_vsp_plane_reset(struct drm_plane *plane) +{ + struct rcar_du_vsp_plane_state *state; + + if (plane->state) { + rcar_du_vsp_plane_atomic_destroy_state(plane, plane->state); + plane->state = NULL; + } + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) + return; + + state->alpha = 255; + + plane->state = &state->state; + plane->state->plane = plane; +} + +static int rcar_du_vsp_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, struct drm_property *property, + uint64_t val) +{ + struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state); + struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev; + + if (property == rcdu->props.alpha) + rstate->alpha = val; + else + return -EINVAL; + + return 0; +} + +static int rcar_du_vsp_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, struct drm_property *property, + uint64_t *val) +{ + const struct rcar_du_vsp_plane_state *rstate = + container_of(state, const struct rcar_du_vsp_plane_state, state); + struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev; + + if (property == rcdu->props.alpha) + *val = rstate->alpha; + else + return -EINVAL; + + return 0; +} + +static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .reset = rcar_du_vsp_plane_reset, + .set_property = drm_atomic_helper_plane_set_property, + .destroy = drm_plane_cleanup, + .atomic_duplicate_state = rcar_du_vsp_plane_atomic_duplicate_state, + .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state, + .atomic_set_property = rcar_du_vsp_plane_atomic_set_property, + .atomic_get_property = rcar_du_vsp_plane_atomic_get_property, +}; + +int rcar_du_vsp_init(struct rcar_du_vsp *vsp) +{ + struct rcar_du_device *rcdu = vsp->dev; + struct platform_device *pdev; + struct device_node *np; + unsigned int i; + int ret; + + /* Find the VSP device and initialize it. */ + np = of_parse_phandle(rcdu->dev->of_node, "vsps", vsp->index); + if (!np) { + dev_err(rcdu->dev, "vsps node not found\n"); + return -ENXIO; + } + + pdev = of_find_device_by_node(np); + of_node_put(np); + if (!pdev) + return -ENXIO; + + vsp->vsp = &pdev->dev; + + ret = vsp1_du_init(vsp->vsp); + if (ret < 0) + return ret; + + /* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to + * 4 RPFs. + */ + vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4; + + vsp->planes = devm_kcalloc(rcdu->dev, vsp->num_planes, + sizeof(*vsp->planes), GFP_KERNEL); + if (!vsp->planes) + return -ENOMEM; + + for (i = 0; i < vsp->num_planes; ++i) { + enum drm_plane_type type = i ? DRM_PLANE_TYPE_OVERLAY + : DRM_PLANE_TYPE_PRIMARY; + struct rcar_du_vsp_plane *plane = &vsp->planes[i]; + + plane->vsp = vsp; + plane->index = i; + + ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, + 1 << vsp->index, + &rcar_du_vsp_plane_funcs, + formats_kms, + ARRAY_SIZE(formats_kms), type, + NULL); + if (ret < 0) + return ret; + + drm_plane_helper_add(&plane->plane, + &rcar_du_vsp_plane_helper_funcs); + + if (type == DRM_PLANE_TYPE_PRIMARY) + continue; + + drm_object_attach_property(&plane->plane.base, + rcdu->props.alpha, 255); + } + + return 0; +} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h new file mode 100644 index 000000000000..df3bf3805c69 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h @@ -0,0 +1,76 @@ +/* + * rcar_du_vsp.h -- R-Car Display Unit VSP-Based Compositor + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __RCAR_DU_VSP_H__ +#define __RCAR_DU_VSP_H__ + +#include <drm/drmP.h> +#include <drm/drm_crtc.h> + +struct rcar_du_format_info; +struct rcar_du_vsp; + +struct rcar_du_vsp_plane { + struct drm_plane plane; + struct rcar_du_vsp *vsp; + unsigned int index; +}; + +struct rcar_du_vsp { + unsigned int index; + struct device *vsp; + struct rcar_du_device *dev; + struct rcar_du_vsp_plane *planes; + unsigned int num_planes; +}; + +static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p) +{ + return container_of(p, struct rcar_du_vsp_plane, plane); +} + +/** + * struct rcar_du_vsp_plane_state - Driver-specific plane state + * @state: base DRM plane state + * @format: information about the pixel format used by the plane + * @alpha: value of the plane alpha property + */ +struct rcar_du_vsp_plane_state { + struct drm_plane_state state; + + const struct rcar_du_format_info *format; + + unsigned int alpha; +}; + +static inline struct rcar_du_vsp_plane_state * +to_rcar_vsp_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct rcar_du_vsp_plane_state, state); +} + +#ifdef CONFIG_DRM_RCAR_VSP +int rcar_du_vsp_init(struct rcar_du_vsp *vsp); +void rcar_du_vsp_enable(struct rcar_du_crtc *crtc); +void rcar_du_vsp_disable(struct rcar_du_crtc *crtc); +void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc); +void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc); +#else +static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return 0; }; +static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { }; +static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { }; +static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { }; +static inline void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { }; +#endif + +#endif /* __RCAR_DU_VSP_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h index 77cf9289ab65..d7d294ba2dbe 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h @@ -1,7 +1,7 @@ /* * rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions * - * Copyright (C) 2013 Renesas Electronics Corporation + * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * @@ -15,28 +15,38 @@ #define LVDCR0 0x0000 #define LVDCR0_DUSEL (1 << 15) -#define LVDCR0_DMD (1 << 12) +#define LVDCR0_DMD (1 << 12) /* Gen2 only */ #define LVDCR0_LVMD_MASK (0xf << 8) #define LVDCR0_LVMD_SHIFT 8 -#define LVDCR0_PLLEN (1 << 4) -#define LVDCR0_BEN (1 << 2) -#define LVDCR0_LVEN (1 << 1) +#define LVDCR0_PLLON (1 << 4) +#define LVDCR0_PWD (1 << 2) /* Gen3 only */ +#define LVDCR0_BEN (1 << 2) /* Gen2 only */ +#define LVDCR0_LVEN (1 << 1) /* Gen2 only */ #define LVDCR0_LVRES (1 << 0) #define LVDCR1 0x0004 -#define LVDCR1_CKSEL (1 << 15) -#define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2)) -#define LVDCR1_CLKSTBY (3 << 0) +#define LVDCR1_CKSEL (1 << 15) /* Gen2 only */ +#define LVDCR1_CHSTBY_GEN2(n) (3 << (2 + (n) * 2)) /* Gen2 only */ +#define LVDCR1_CHSTBY_GEN3(n) (1 << (2 + (n) * 2)) /* Gen3 only */ +#define LVDCR1_CLKSTBY_GEN2 (3 << 0) /* Gen2 only */ +#define LVDCR1_CLKSTBY_GEN3 (1 << 0) /* Gen3 only */ #define LVDPLLCR 0x0008 #define LVDPLLCR_CEEN (1 << 14) #define LVDPLLCR_FBEN (1 << 13) #define LVDPLLCR_COSEL (1 << 12) +/* Gen2 */ #define LVDPLLCR_PLLDLYCNT_150M (0x1bf << 0) #define LVDPLLCR_PLLDLYCNT_121M (0x22c << 0) #define LVDPLLCR_PLLDLYCNT_60M (0x77b << 0) #define LVDPLLCR_PLLDLYCNT_38M (0x69a << 0) #define LVDPLLCR_PLLDLYCNT_MASK (0x7ff << 0) +/* Gen3 */ +#define LVDPLLCR_PLLDIVCNT_42M (0x014cb << 0) +#define LVDPLLCR_PLLDIVCNT_85M (0x00a45 << 0) +#define LVDPLLCR_PLLDIVCNT_128M (0x006c3 << 0) +#define LVDPLLCR_PLLDIVCNT_148M (0x046c1 << 0) +#define LVDPLLCR_PLLDIVCNT_MASK (0x7ffff << 0) #define LVDCTRCR 0x000c #define LVDCTRCR_CTR3SEL_ZERO (0 << 12) diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 85739859dffc..76b3362c5e59 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -35,3 +35,11 @@ config ROCKCHIP_DW_MIPI_DSI for the Synopsys DesignWare HDMI driver. If you want to enable MIPI DSI on RK3288 based SoC, you should selet this option. + +config ROCKCHIP_INNO_HDMI + tristate "Rockchip specific extensions for Innosilicon HDMI" + depends on DRM_ROCKCHIP + help + This selects support for Rockchip SoC specific extensions + for the Innosilicon HDMI driver. If you want to enable + HDMI on RK3036 based SoC, you should select this option. diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index f6a809afceec..df8fbef17791 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -8,5 +8,6 @@ rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o +obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_vop_reg.o diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index f8f8f29fb7c3..7975158064e8 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c @@ -875,17 +875,10 @@ static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder) clk_disable_unprepare(dsi->pclk); } -static bool dw_mipi_dsi_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder) { struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder); - int mux = rockchip_drm_encoder_get_mux_id(dsi->dev->of_node, encoder); + int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder); u32 interface_pix_fmt; u32 val; @@ -931,7 +924,6 @@ static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder) static struct drm_encoder_helper_funcs dw_mipi_dsi_encoder_helper_funcs = { - .mode_fixup = dw_mipi_dsi_encoder_mode_fixup, .commit = dw_mipi_dsi_encoder_commit, .mode_set = dw_mipi_dsi_encoder_mode_set, .disable = dw_mipi_dsi_encoder_disable, diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index c65ce8cb30d3..3d3cf2f8891e 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -204,7 +204,7 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder) rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA, ROCKCHIP_OUT_MODE_AAAA); - mux = rockchip_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder); + mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder); if (mux) val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16); else diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c new file mode 100644 index 000000000000..10d62fff22f1 --- /dev/null +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -0,0 +1,938 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Zheng Yang <zhengyang@rock-chips.com> + * Yakir Yang <ykk@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/hdmi.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_device.h> + +#include <drm/drm_of.h> +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_vop.h" + +#include "inno_hdmi.h" + +#define to_inno_hdmi(x) container_of(x, struct inno_hdmi, x) + +struct hdmi_data_info { + int vic; + bool sink_is_hdmi; + bool sink_has_audio; + unsigned int enc_in_format; + unsigned int enc_out_format; + unsigned int colorimetry; +}; + +struct inno_hdmi_i2c { + struct i2c_adapter adap; + + u8 ddc_addr; + u8 segment_addr; + + struct mutex lock; + struct completion cmp; +}; + +struct inno_hdmi { + struct device *dev; + struct drm_device *drm_dev; + + int irq; + struct clk *pclk; + void __iomem *regs; + + struct drm_connector connector; + struct drm_encoder encoder; + + struct inno_hdmi_i2c *i2c; + struct i2c_adapter *ddc; + + unsigned int tmds_rate; + + struct hdmi_data_info hdmi_data; + struct drm_display_mode previous_mode; +}; + +enum { + CSC_ITU601_16_235_TO_RGB_0_255_8BIT, + CSC_ITU601_0_255_TO_RGB_0_255_8BIT, + CSC_ITU709_16_235_TO_RGB_0_255_8BIT, + CSC_RGB_0_255_TO_ITU601_16_235_8BIT, + CSC_RGB_0_255_TO_ITU709_16_235_8BIT, + CSC_RGB_0_255_TO_RGB_16_235_8BIT, +}; + +static const char coeff_csc[][24] = { + /* + * YUV2RGB:601 SD mode(Y[16:235], UV[16:240], RGB[0:255]): + * R = 1.164*Y + 1.596*V - 204 + * G = 1.164*Y - 0.391*U - 0.813*V + 154 + * B = 1.164*Y + 2.018*U - 258 + */ + { + 0x04, 0xa7, 0x00, 0x00, 0x06, 0x62, 0x02, 0xcc, + 0x04, 0xa7, 0x11, 0x90, 0x13, 0x40, 0x00, 0x9a, + 0x04, 0xa7, 0x08, 0x12, 0x00, 0x00, 0x03, 0x02 + }, + /* + * YUV2RGB:601 SD mode(YUV[0:255],RGB[0:255]): + * R = Y + 1.402*V - 248 + * G = Y - 0.344*U - 0.714*V + 135 + * B = Y + 1.772*U - 227 + */ + { + 0x04, 0x00, 0x00, 0x00, 0x05, 0x9b, 0x02, 0xf8, + 0x04, 0x00, 0x11, 0x60, 0x12, 0xdb, 0x00, 0x87, + 0x04, 0x00, 0x07, 0x16, 0x00, 0x00, 0x02, 0xe3 + }, + /* + * YUV2RGB:709 HD mode(Y[16:235],UV[16:240],RGB[0:255]): + * R = 1.164*Y + 1.793*V - 248 + * G = 1.164*Y - 0.213*U - 0.534*V + 77 + * B = 1.164*Y + 2.115*U - 289 + */ + { + 0x04, 0xa7, 0x00, 0x00, 0x07, 0x2c, 0x02, 0xf8, + 0x04, 0xa7, 0x10, 0xda, 0x12, 0x22, 0x00, 0x4d, + 0x04, 0xa7, 0x08, 0x74, 0x00, 0x00, 0x03, 0x21 + }, + + /* + * RGB2YUV:601 SD mode: + * Cb = -0.291G - 0.148R + 0.439B + 128 + * Y = 0.504G + 0.257R + 0.098B + 16 + * Cr = -0.368G + 0.439R - 0.071B + 128 + */ + { + 0x11, 0x5f, 0x01, 0x82, 0x10, 0x23, 0x00, 0x80, + 0x02, 0x1c, 0x00, 0xa1, 0x00, 0x36, 0x00, 0x1e, + 0x11, 0x29, 0x10, 0x59, 0x01, 0x82, 0x00, 0x80 + }, + /* + * RGB2YUV:709 HD mode: + * Cb = - 0.338G - 0.101R + 0.439B + 128 + * Y = 0.614G + 0.183R + 0.062B + 16 + * Cr = - 0.399G + 0.439R - 0.040B + 128 + */ + { + 0x11, 0x98, 0x01, 0xc1, 0x10, 0x28, 0x00, 0x80, + 0x02, 0x74, 0x00, 0xbb, 0x00, 0x3f, 0x00, 0x10, + 0x11, 0x5a, 0x10, 0x67, 0x01, 0xc1, 0x00, 0x80 + }, + /* + * RGB[0:255]2RGB[16:235]: + * R' = R x (235-16)/255 + 16; + * G' = G x (235-16)/255 + 16; + * B' = B x (235-16)/255 + 16; + */ + { + 0x00, 0x00, 0x03, 0x6F, 0x00, 0x00, 0x00, 0x10, + 0x03, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x6F, 0x00, 0x10 + }, +}; + +static inline u8 hdmi_readb(struct inno_hdmi *hdmi, u16 offset) +{ + return readl_relaxed(hdmi->regs + (offset) * 0x04); +} + +static inline void hdmi_writeb(struct inno_hdmi *hdmi, u16 offset, u32 val) +{ + writel_relaxed(val, hdmi->regs + (offset) * 0x04); +} + +static inline void hdmi_modb(struct inno_hdmi *hdmi, u16 offset, + u32 msk, u32 val) +{ + u8 temp = hdmi_readb(hdmi, offset) & ~msk; + + temp |= val & msk; + hdmi_writeb(hdmi, offset, temp); +} + +static void inno_hdmi_i2c_init(struct inno_hdmi *hdmi) +{ + int ddc_bus_freq; + + ddc_bus_freq = (hdmi->tmds_rate >> 2) / HDMI_SCL_RATE; + + hdmi_writeb(hdmi, DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF); + hdmi_writeb(hdmi, DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF); + + /* Clear the EDID interrupt flag and mute the interrupt */ + hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, 0); + hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY); +} + +static void inno_hdmi_sys_power(struct inno_hdmi *hdmi, bool enable) +{ + if (enable) + hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_ON); + else + hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_OFF); +} + +static void inno_hdmi_set_pwr_mode(struct inno_hdmi *hdmi, int mode) +{ + switch (mode) { + case NORMAL: + inno_hdmi_sys_power(hdmi, false); + + hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x6f); + hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0xbb); + + hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15); + hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x14); + hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x10); + hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x0f); + hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x00); + hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x01); + + inno_hdmi_sys_power(hdmi, true); + break; + + case LOWER_PWR: + inno_hdmi_sys_power(hdmi, false); + hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0x00); + hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x00); + hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x00); + hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15); + + break; + + default: + dev_err(hdmi->dev, "Unknown power mode %d\n", mode); + } +} + +static void inno_hdmi_reset(struct inno_hdmi *hdmi) +{ + u32 val; + u32 msk; + + hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_DIGITAL, v_NOT_RST_DIGITAL); + udelay(100); + + hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_ANALOG, v_NOT_RST_ANALOG); + udelay(100); + + msk = m_REG_CLK_INV | m_REG_CLK_SOURCE | m_POWER | m_INT_POL; + val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH; + hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val); + + inno_hdmi_set_pwr_mode(hdmi, NORMAL); +} + +static int inno_hdmi_upload_frame(struct inno_hdmi *hdmi, int setup_rc, + union hdmi_infoframe *frame, u32 frame_index, + u32 mask, u32 disable, u32 enable) +{ + if (mask) + hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, disable); + + hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_BUF_INDEX, frame_index); + + if (setup_rc >= 0) { + u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE]; + ssize_t rc, i; + + rc = hdmi_infoframe_pack(frame, packed_frame, + sizeof(packed_frame)); + if (rc < 0) + return rc; + + for (i = 0; i < rc; i++) + hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i, + packed_frame[i]); + + if (mask) + hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, enable); + } + + return setup_rc; +} + +static int inno_hdmi_config_video_vsi(struct inno_hdmi *hdmi, + struct drm_display_mode *mode) +{ + union hdmi_infoframe frame; + int rc; + + rc = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, + mode); + + return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_VSI, + m_PACKET_VSI_EN, v_PACKET_VSI_EN(0), v_PACKET_VSI_EN(1)); +} + +static int inno_hdmi_config_video_avi(struct inno_hdmi *hdmi, + struct drm_display_mode *mode) +{ + union hdmi_infoframe frame; + int rc; + + rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode); + + if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444) + frame.avi.colorspace = HDMI_COLORSPACE_YUV444; + else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422) + frame.avi.colorspace = HDMI_COLORSPACE_YUV422; + else + frame.avi.colorspace = HDMI_COLORSPACE_RGB; + + return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_AVI, 0, 0, 0); +} + +static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi) +{ + struct hdmi_data_info *data = &hdmi->hdmi_data; + int c0_c2_change = 0; + int csc_enable = 0; + int csc_mode = 0; + int auto_csc = 0; + int value; + int i; + + /* Input video mode is SDR RGB24bit, data enable signal from external */ + hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL1, v_DE_EXTERNAL | + v_VIDEO_INPUT_FORMAT(VIDEO_INPUT_SDR_RGB444)); + + /* Input color hardcode to RGB, and output color hardcode to RGB888 */ + value = v_VIDEO_INPUT_BITS(VIDEO_INPUT_8BITS) | + v_VIDEO_OUTPUT_COLOR(0) | + v_VIDEO_INPUT_CSP(0); + hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL2, value); + + if (data->enc_in_format == data->enc_out_format) { + if ((data->enc_in_format == HDMI_COLORSPACE_RGB) || + (data->enc_in_format >= HDMI_COLORSPACE_YUV444)) { + value = v_SOF_DISABLE | v_COLOR_DEPTH_NOT_INDICATED(1); + hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value); + + hdmi_modb(hdmi, HDMI_VIDEO_CONTRL, + m_VIDEO_AUTO_CSC | m_VIDEO_C0_C2_SWAP, + v_VIDEO_AUTO_CSC(AUTO_CSC_DISABLE) | + v_VIDEO_C0_C2_SWAP(C0_C2_CHANGE_DISABLE)); + return 0; + } + } + + if (data->colorimetry == HDMI_COLORIMETRY_ITU_601) { + if ((data->enc_in_format == HDMI_COLORSPACE_RGB) && + (data->enc_out_format == HDMI_COLORSPACE_YUV444)) { + csc_mode = CSC_RGB_0_255_TO_ITU601_16_235_8BIT; + auto_csc = AUTO_CSC_DISABLE; + c0_c2_change = C0_C2_CHANGE_DISABLE; + csc_enable = v_CSC_ENABLE; + } else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) && + (data->enc_out_format == HDMI_COLORSPACE_RGB)) { + csc_mode = CSC_ITU601_16_235_TO_RGB_0_255_8BIT; + auto_csc = AUTO_CSC_ENABLE; + c0_c2_change = C0_C2_CHANGE_DISABLE; + csc_enable = v_CSC_DISABLE; + } + } else { + if ((data->enc_in_format == HDMI_COLORSPACE_RGB) && + (data->enc_out_format == HDMI_COLORSPACE_YUV444)) { + csc_mode = CSC_RGB_0_255_TO_ITU709_16_235_8BIT; + auto_csc = AUTO_CSC_DISABLE; + c0_c2_change = C0_C2_CHANGE_DISABLE; + csc_enable = v_CSC_ENABLE; + } else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) && + (data->enc_out_format == HDMI_COLORSPACE_RGB)) { + csc_mode = CSC_ITU709_16_235_TO_RGB_0_255_8BIT; + auto_csc = AUTO_CSC_ENABLE; + c0_c2_change = C0_C2_CHANGE_DISABLE; + csc_enable = v_CSC_DISABLE; + } + } + + for (i = 0; i < 24; i++) + hdmi_writeb(hdmi, HDMI_VIDEO_CSC_COEF + i, + coeff_csc[csc_mode][i]); + + value = v_SOF_DISABLE | csc_enable | v_COLOR_DEPTH_NOT_INDICATED(1); + hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value); + hdmi_modb(hdmi, HDMI_VIDEO_CONTRL, m_VIDEO_AUTO_CSC | + m_VIDEO_C0_C2_SWAP, v_VIDEO_AUTO_CSC(auto_csc) | + v_VIDEO_C0_C2_SWAP(c0_c2_change)); + + return 0; +} + +static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi, + struct drm_display_mode *mode) +{ + int value; + + /* Set detail external video timing polarity and interlace mode */ + value = v_EXTERANL_VIDEO(1); + value |= mode->flags & DRM_MODE_FLAG_PHSYNC ? + v_HSYNC_POLARITY(1) : v_HSYNC_POLARITY(0); + value |= mode->flags & DRM_MODE_FLAG_PVSYNC ? + v_VSYNC_POLARITY(1) : v_VSYNC_POLARITY(0); + value |= mode->flags & DRM_MODE_FLAG_INTERLACE ? + v_INETLACE(1) : v_INETLACE(0); + hdmi_writeb(hdmi, HDMI_VIDEO_TIMING_CTL, value); + + /* Set detail external video timing */ + value = mode->htotal; + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HTOTAL_L, value & 0xFF); + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HTOTAL_H, (value >> 8) & 0xFF); + + value = mode->htotal - mode->hdisplay; + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF); + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF); + + value = mode->hsync_start - mode->hdisplay; + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF); + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF); + + value = mode->hsync_end - mode->hsync_start; + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDURATION_L, value & 0xFF); + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDURATION_H, (value >> 8) & 0xFF); + + value = mode->vtotal; + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VTOTAL_L, value & 0xFF); + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VTOTAL_H, (value >> 8) & 0xFF); + + value = mode->vtotal - mode->vdisplay; + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF); + + value = mode->vsync_start - mode->vdisplay; + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF); + + value = mode->vsync_end - mode->vsync_start; + hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDURATION, value & 0xFF); + + hdmi_writeb(hdmi, HDMI_PHY_PRE_DIV_RATIO, 0x1e); + hdmi_writeb(hdmi, HDMI_PHY_FEEDBACK_DIV_RATIO_LOW, 0x2c); + hdmi_writeb(hdmi, HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH, 0x01); + + return 0; +} + +static int inno_hdmi_setup(struct inno_hdmi *hdmi, + struct drm_display_mode *mode) +{ + hdmi->hdmi_data.vic = drm_match_cea_mode(mode); + + hdmi->hdmi_data.enc_in_format = HDMI_COLORSPACE_RGB; + hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB; + + if ((hdmi->hdmi_data.vic == 6) || (hdmi->hdmi_data.vic == 7) || + (hdmi->hdmi_data.vic == 21) || (hdmi->hdmi_data.vic == 22) || + (hdmi->hdmi_data.vic == 2) || (hdmi->hdmi_data.vic == 3) || + (hdmi->hdmi_data.vic == 17) || (hdmi->hdmi_data.vic == 18)) + hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601; + else + hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709; + + /* Mute video and audio output */ + hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK, + v_AUDIO_MUTE(1) | v_VIDEO_MUTE(1)); + + /* Set HDMI Mode */ + hdmi_writeb(hdmi, HDMI_HDCP_CTRL, + v_HDMI_DVI(hdmi->hdmi_data.sink_is_hdmi)); + + inno_hdmi_config_video_timing(hdmi, mode); + + inno_hdmi_config_video_csc(hdmi); + + if (hdmi->hdmi_data.sink_is_hdmi) { + inno_hdmi_config_video_avi(hdmi, mode); + inno_hdmi_config_video_vsi(hdmi, mode); + } + + /* + * When IP controller have configured to an accurate video + * timing, then the TMDS clock source would be switched to + * DCLK_LCDC, so we need to init the TMDS rate to mode pixel + * clock rate, and reconfigure the DDC clock. + */ + hdmi->tmds_rate = mode->clock * 1000; + inno_hdmi_i2c_init(hdmi); + + /* Unmute video and audio output */ + hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK, + v_AUDIO_MUTE(0) | v_VIDEO_MUTE(0)); + + return 0; +} + +static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + struct inno_hdmi *hdmi = to_inno_hdmi(encoder); + + inno_hdmi_setup(hdmi, adj_mode); + + /* Store the display mode for plugin/DPMS poweron events */ + memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); +} + +static void inno_hdmi_encoder_enable(struct drm_encoder *encoder) +{ + struct inno_hdmi *hdmi = to_inno_hdmi(encoder); + + rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA, + ROCKCHIP_OUT_MODE_P888); + + inno_hdmi_set_pwr_mode(hdmi, NORMAL); +} + +static void inno_hdmi_encoder_disable(struct drm_encoder *encoder) +{ + struct inno_hdmi *hdmi = to_inno_hdmi(encoder); + + inno_hdmi_set_pwr_mode(hdmi, LOWER_PWR); +} + +static bool inno_hdmi_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + return true; +} + +static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = { + .enable = inno_hdmi_encoder_enable, + .disable = inno_hdmi_encoder_disable, + .mode_fixup = inno_hdmi_encoder_mode_fixup, + .mode_set = inno_hdmi_encoder_mode_set, +}; + +static struct drm_encoder_funcs inno_hdmi_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static enum drm_connector_status +inno_hdmi_connector_detect(struct drm_connector *connector, bool force) +{ + struct inno_hdmi *hdmi = to_inno_hdmi(connector); + + return (hdmi_readb(hdmi, HDMI_STATUS) & m_HOTPLUG) ? + connector_status_connected : connector_status_disconnected; +} + +static int inno_hdmi_connector_get_modes(struct drm_connector *connector) +{ + struct inno_hdmi *hdmi = to_inno_hdmi(connector); + struct edid *edid; + int ret = 0; + + if (!hdmi->ddc) + return 0; + + edid = drm_get_edid(connector, hdmi->ddc); + if (edid) { + hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid); + hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid); + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + } + + return ret; +} + +static enum drm_mode_status +inno_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static struct drm_encoder * +inno_hdmi_connector_best_encoder(struct drm_connector *connector) +{ + struct inno_hdmi *hdmi = to_inno_hdmi(connector); + + return &hdmi->encoder; +} + +static int +inno_hdmi_probe_single_connector_modes(struct drm_connector *connector, + uint32_t maxX, uint32_t maxY) +{ + return drm_helper_probe_single_connector_modes(connector, 1920, 1080); +} + +static void inno_hdmi_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static struct drm_connector_funcs inno_hdmi_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .fill_modes = inno_hdmi_probe_single_connector_modes, + .detect = inno_hdmi_connector_detect, + .destroy = inno_hdmi_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = { + .get_modes = inno_hdmi_connector_get_modes, + .mode_valid = inno_hdmi_connector_mode_valid, + .best_encoder = inno_hdmi_connector_best_encoder, +}; + +static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi) +{ + struct drm_encoder *encoder = &hdmi->encoder; + struct device *dev = hdmi->dev; + + encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); + + /* + * If we failed to find the CRTC(s) which this encoder is + * supposed to be connected to, it's because the CRTC has + * not been registered yet. Defer probing, and hope that + * the required CRTC is added later. + */ + if (encoder->possible_crtcs == 0) + return -EPROBE_DEFER; + + drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs); + drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + + hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; + + drm_connector_helper_add(&hdmi->connector, + &inno_hdmi_connector_helper_funcs); + drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + + drm_mode_connector_attach_encoder(&hdmi->connector, encoder); + + return 0; +} + +static irqreturn_t inno_hdmi_i2c_irq(struct inno_hdmi *hdmi) +{ + struct inno_hdmi_i2c *i2c = hdmi->i2c; + u8 stat; + + stat = hdmi_readb(hdmi, HDMI_INTERRUPT_STATUS1); + if (!(stat & m_INT_EDID_READY)) + return IRQ_NONE; + + /* Clear HDMI EDID interrupt flag */ + hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY); + + complete(&i2c->cmp); + + return IRQ_HANDLED; +} + +static irqreturn_t inno_hdmi_hardirq(int irq, void *dev_id) +{ + struct inno_hdmi *hdmi = dev_id; + irqreturn_t ret = IRQ_NONE; + u8 interrupt; + + if (hdmi->i2c) + ret = inno_hdmi_i2c_irq(hdmi); + + interrupt = hdmi_readb(hdmi, HDMI_STATUS); + if (interrupt & m_INT_HOTPLUG) { + hdmi_modb(hdmi, HDMI_STATUS, m_INT_HOTPLUG, m_INT_HOTPLUG); + ret = IRQ_WAKE_THREAD; + } + + return ret; +} + +static irqreturn_t inno_hdmi_irq(int irq, void *dev_id) +{ + struct inno_hdmi *hdmi = dev_id; + + drm_helper_hpd_irq_event(hdmi->connector.dev); + + return IRQ_HANDLED; +} + +static int inno_hdmi_i2c_read(struct inno_hdmi *hdmi, struct i2c_msg *msgs) +{ + int length = msgs->len; + u8 *buf = msgs->buf; + int ret; + + ret = wait_for_completion_timeout(&hdmi->i2c->cmp, HZ / 10); + if (!ret) + return -EAGAIN; + + while (length--) + *buf++ = hdmi_readb(hdmi, HDMI_EDID_FIFO_ADDR); + + return 0; +} + +static int inno_hdmi_i2c_write(struct inno_hdmi *hdmi, struct i2c_msg *msgs) +{ + /* + * The DDC module only support read EDID message, so + * we assume that each word write to this i2c adapter + * should be the offset of EDID word address. + */ + if ((msgs->len != 1) || + ((msgs->addr != DDC_ADDR) && (msgs->addr != DDC_SEGMENT_ADDR))) + return -EINVAL; + + reinit_completion(&hdmi->i2c->cmp); + + if (msgs->addr == DDC_SEGMENT_ADDR) + hdmi->i2c->segment_addr = msgs->buf[0]; + if (msgs->addr == DDC_ADDR) + hdmi->i2c->ddc_addr = msgs->buf[0]; + + /* Set edid fifo first addr */ + hdmi_writeb(hdmi, HDMI_EDID_FIFO_OFFSET, 0x00); + + /* Set edid word address 0x00/0x80 */ + hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr); + + /* Set edid segment pointer */ + hdmi_writeb(hdmi, HDMI_EDID_SEGMENT_POINTER, hdmi->i2c->segment_addr); + + return 0; +} + +static int inno_hdmi_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct inno_hdmi *hdmi = i2c_get_adapdata(adap); + struct inno_hdmi_i2c *i2c = hdmi->i2c; + int i, ret = 0; + + mutex_lock(&i2c->lock); + + /* Clear the EDID interrupt flag and unmute the interrupt */ + hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, m_INT_EDID_READY); + hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY); + + for (i = 0; i < num; i++) { + dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n", + i + 1, num, msgs[i].len, msgs[i].flags); + + if (msgs[i].flags & I2C_M_RD) + ret = inno_hdmi_i2c_read(hdmi, &msgs[i]); + else + ret = inno_hdmi_i2c_write(hdmi, &msgs[i]); + + if (ret < 0) + break; + } + + if (!ret) + ret = num; + + /* Mute HDMI EDID interrupt */ + hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, 0); + + mutex_unlock(&i2c->lock); + + return ret; +} + +static u32 inno_hdmi_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm inno_hdmi_algorithm = { + .master_xfer = inno_hdmi_i2c_xfer, + .functionality = inno_hdmi_i2c_func, +}; + +static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi) +{ + struct i2c_adapter *adap; + struct inno_hdmi_i2c *i2c; + int ret; + + i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return ERR_PTR(-ENOMEM); + + mutex_init(&i2c->lock); + init_completion(&i2c->cmp); + + adap = &i2c->adap; + adap->class = I2C_CLASS_DDC; + adap->owner = THIS_MODULE; + adap->dev.parent = hdmi->dev; + adap->dev.of_node = hdmi->dev->of_node; + adap->algo = &inno_hdmi_algorithm; + strlcpy(adap->name, "Inno HDMI", sizeof(adap->name)); + i2c_set_adapdata(adap, hdmi); + + ret = i2c_add_adapter(adap); + if (ret) { + dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name); + devm_kfree(hdmi->dev, i2c); + return ERR_PTR(ret); + } + + hdmi->i2c = i2c; + + dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name); + + return adap; +} + +static int inno_hdmi_bind(struct device *dev, struct device *master, + void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = data; + struct inno_hdmi *hdmi; + struct resource *iores; + int irq; + int ret; + + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + hdmi->dev = dev; + hdmi->drm_dev = drm; + + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iores) + return -ENXIO; + + hdmi->regs = devm_ioremap_resource(dev, iores); + if (IS_ERR(hdmi->regs)) + return PTR_ERR(hdmi->regs); + + hdmi->pclk = devm_clk_get(hdmi->dev, "pclk"); + if (IS_ERR(hdmi->pclk)) { + dev_err(hdmi->dev, "Unable to get HDMI pclk clk\n"); + return PTR_ERR(hdmi->pclk); + } + + ret = clk_prepare_enable(hdmi->pclk); + if (ret) { + dev_err(hdmi->dev, "Cannot enable HDMI pclk clock: %d\n", ret); + return ret; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + inno_hdmi_reset(hdmi); + + hdmi->ddc = inno_hdmi_i2c_adapter(hdmi); + if (IS_ERR(hdmi->ddc)) { + hdmi->ddc = NULL; + return PTR_ERR(hdmi->ddc); + } + + /* + * When IP controller haven't configured to an accurate video + * timing, then the TMDS clock source would be switched to + * PCLK_HDMI, so we need to init the TMDS rate to PCLK rate, + * and reconfigure the DDC clock. + */ + hdmi->tmds_rate = clk_get_rate(hdmi->pclk); + inno_hdmi_i2c_init(hdmi); + + ret = inno_hdmi_register(drm, hdmi); + if (ret) + return ret; + + dev_set_drvdata(dev, hdmi); + + /* Unmute hotplug interrupt */ + hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1)); + + ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq, + inno_hdmi_irq, IRQF_SHARED, + dev_name(dev), hdmi); + + return ret; +} + +static void inno_hdmi_unbind(struct device *dev, struct device *master, + void *data) +{ + struct inno_hdmi *hdmi = dev_get_drvdata(dev); + + hdmi->connector.funcs->destroy(&hdmi->connector); + hdmi->encoder.funcs->destroy(&hdmi->encoder); + + clk_disable_unprepare(hdmi->pclk); + i2c_put_adapter(hdmi->ddc); +} + +static const struct component_ops inno_hdmi_ops = { + .bind = inno_hdmi_bind, + .unbind = inno_hdmi_unbind, +}; + +static int inno_hdmi_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &inno_hdmi_ops); +} + +static int inno_hdmi_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &inno_hdmi_ops); + + return 0; +} + +static const struct of_device_id inno_hdmi_dt_ids[] = { + { .compatible = "rockchip,rk3036-inno-hdmi", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, inno_hdmi_dt_ids); + +static struct platform_driver inno_hdmi_driver = { + .probe = inno_hdmi_probe, + .remove = inno_hdmi_remove, + .driver = { + .name = "innohdmi-rockchip", + .of_match_table = inno_hdmi_dt_ids, + }, +}; + +module_platform_driver(inno_hdmi_driver); + +MODULE_AUTHOR("Zheng Yang <zhengyang@rock-chips.com>"); +MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>"); +MODULE_DESCRIPTION("Rockchip Specific INNO-HDMI Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:innohdmi-rockchip"); diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h new file mode 100644 index 000000000000..aa7c415f8cc1 --- /dev/null +++ b/drivers/gpu/drm/rockchip/inno_hdmi.h @@ -0,0 +1,362 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Zheng Yang <zhengyang@rock-chips.com> + * Yakir Yang <ykk@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __INNO_HDMI_H__ +#define __INNO_HDMI_H__ + +#define DDC_SEGMENT_ADDR 0x30 + +enum PWR_MODE { + NORMAL, + LOWER_PWR, +}; + +#define HDMI_SCL_RATE (100*1000) +#define DDC_BUS_FREQ_L 0x4b +#define DDC_BUS_FREQ_H 0x4c + +#define HDMI_SYS_CTRL 0x00 +#define m_RST_ANALOG (1 << 6) +#define v_RST_ANALOG (0 << 6) +#define v_NOT_RST_ANALOG (1 << 6) +#define m_RST_DIGITAL (1 << 5) +#define v_RST_DIGITAL (0 << 5) +#define v_NOT_RST_DIGITAL (1 << 5) +#define m_REG_CLK_INV (1 << 4) +#define v_REG_CLK_NOT_INV (0 << 4) +#define v_REG_CLK_INV (1 << 4) +#define m_VCLK_INV (1 << 3) +#define v_VCLK_NOT_INV (0 << 3) +#define v_VCLK_INV (1 << 3) +#define m_REG_CLK_SOURCE (1 << 2) +#define v_REG_CLK_SOURCE_TMDS (0 << 2) +#define v_REG_CLK_SOURCE_SYS (1 << 2) +#define m_POWER (1 << 1) +#define v_PWR_ON (0 << 1) +#define v_PWR_OFF (1 << 1) +#define m_INT_POL (1 << 0) +#define v_INT_POL_HIGH 1 +#define v_INT_POL_LOW 0 + +#define HDMI_VIDEO_CONTRL1 0x01 +#define m_VIDEO_INPUT_FORMAT (7 << 1) +#define m_DE_SOURCE (1 << 0) +#define v_VIDEO_INPUT_FORMAT(n) (n << 1) +#define v_DE_EXTERNAL 1 +#define v_DE_INTERNAL 0 +enum { + VIDEO_INPUT_SDR_RGB444 = 0, + VIDEO_INPUT_DDR_RGB444 = 5, + VIDEO_INPUT_DDR_YCBCR422 = 6 +}; + +#define HDMI_VIDEO_CONTRL2 0x02 +#define m_VIDEO_OUTPUT_COLOR (3 << 6) +#define m_VIDEO_INPUT_BITS (3 << 4) +#define m_VIDEO_INPUT_CSP (1 << 0) +#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6) +#define v_VIDEO_INPUT_BITS(n) (n << 4) +#define v_VIDEO_INPUT_CSP(n) (n << 0) +enum { + VIDEO_INPUT_12BITS = 0, + VIDEO_INPUT_10BITS = 1, + VIDEO_INPUT_REVERT = 2, + VIDEO_INPUT_8BITS = 3, +}; + +#define HDMI_VIDEO_CONTRL 0x03 +#define m_VIDEO_AUTO_CSC (1 << 7) +#define v_VIDEO_AUTO_CSC(n) (n << 7) +#define m_VIDEO_C0_C2_SWAP (1 << 0) +#define v_VIDEO_C0_C2_SWAP(n) (n << 0) +enum { + C0_C2_CHANGE_ENABLE = 0, + C0_C2_CHANGE_DISABLE = 1, + AUTO_CSC_DISABLE = 0, + AUTO_CSC_ENABLE = 1, +}; + +#define HDMI_VIDEO_CONTRL3 0x04 +#define m_COLOR_DEPTH_NOT_INDICATED (1 << 4) +#define m_SOF (1 << 3) +#define m_COLOR_RANGE (1 << 2) +#define m_CSC (1 << 0) +#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4) +#define v_SOF_ENABLE (0 << 3) +#define v_SOF_DISABLE (1 << 3) +#define v_COLOR_RANGE_FULL (1 << 2) +#define v_COLOR_RANGE_LIMITED (0 << 2) +#define v_CSC_ENABLE 1 +#define v_CSC_DISABLE 0 + +#define HDMI_AV_MUTE 0x05 +#define m_AVMUTE_CLEAR (1 << 7) +#define m_AVMUTE_ENABLE (1 << 6) +#define m_AUDIO_MUTE (1 << 1) +#define m_VIDEO_BLACK (1 << 0) +#define v_AVMUTE_CLEAR(n) (n << 7) +#define v_AVMUTE_ENABLE(n) (n << 6) +#define v_AUDIO_MUTE(n) (n << 1) +#define v_VIDEO_MUTE(n) (n << 0) + +#define HDMI_VIDEO_TIMING_CTL 0x08 +#define v_HSYNC_POLARITY(n) (n << 3) +#define v_VSYNC_POLARITY(n) (n << 2) +#define v_INETLACE(n) (n << 1) +#define v_EXTERANL_VIDEO(n) (n << 0) + +#define HDMI_VIDEO_EXT_HTOTAL_L 0x09 +#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a +#define HDMI_VIDEO_EXT_HBLANK_L 0x0b +#define HDMI_VIDEO_EXT_HBLANK_H 0x0c +#define HDMI_VIDEO_EXT_HDELAY_L 0x0d +#define HDMI_VIDEO_EXT_HDELAY_H 0x0e +#define HDMI_VIDEO_EXT_HDURATION_L 0x0f +#define HDMI_VIDEO_EXT_HDURATION_H 0x10 +#define HDMI_VIDEO_EXT_VTOTAL_L 0x11 +#define HDMI_VIDEO_EXT_VTOTAL_H 0x12 +#define HDMI_VIDEO_EXT_VBLANK 0x13 +#define HDMI_VIDEO_EXT_VDELAY 0x14 +#define HDMI_VIDEO_EXT_VDURATION 0x15 + +#define HDMI_VIDEO_CSC_COEF 0x18 + +#define HDMI_AUDIO_CTRL1 0x35 +enum { + CTS_SOURCE_INTERNAL = 0, + CTS_SOURCE_EXTERNAL = 1, +}; +#define v_CTS_SOURCE(n) (n << 7) + +enum { + DOWNSAMPLE_DISABLE = 0, + DOWNSAMPLE_1_2 = 1, + DOWNSAMPLE_1_4 = 2, +}; +#define v_DOWN_SAMPLE(n) (n << 5) + +enum { + AUDIO_SOURCE_IIS = 0, + AUDIO_SOURCE_SPDIF = 1, +}; +#define v_AUDIO_SOURCE(n) (n << 3) + +#define v_MCLK_ENABLE(n) (n << 2) +enum { + MCLK_128FS = 0, + MCLK_256FS = 1, + MCLK_384FS = 2, + MCLK_512FS = 3, +}; +#define v_MCLK_RATIO(n) (n) + +#define AUDIO_SAMPLE_RATE 0x37 +enum { + AUDIO_32K = 0x3, + AUDIO_441K = 0x0, + AUDIO_48K = 0x2, + AUDIO_882K = 0x8, + AUDIO_96K = 0xa, + AUDIO_1764K = 0xc, + AUDIO_192K = 0xe, +}; + +#define AUDIO_I2S_MODE 0x38 +enum { + I2S_CHANNEL_1_2 = 1, + I2S_CHANNEL_3_4 = 3, + I2S_CHANNEL_5_6 = 7, + I2S_CHANNEL_7_8 = 0xf +}; +#define v_I2S_CHANNEL(n) ((n) << 2) +enum { + I2S_STANDARD = 0, + I2S_LEFT_JUSTIFIED = 1, + I2S_RIGHT_JUSTIFIED = 2, +}; +#define v_I2S_MODE(n) (n) + +#define AUDIO_I2S_MAP 0x39 +#define AUDIO_I2S_SWAPS_SPDIF 0x3a +#define v_SPIDF_FREQ(n) (n) + +#define N_32K 0x1000 +#define N_441K 0x1880 +#define N_882K 0x3100 +#define N_1764K 0x6200 +#define N_48K 0x1800 +#define N_96K 0x3000 +#define N_192K 0x6000 + +#define HDMI_AUDIO_CHANNEL_STATUS 0x3e +#define m_AUDIO_STATUS_NLPCM (1 << 7) +#define m_AUDIO_STATUS_USE (1 << 6) +#define m_AUDIO_STATUS_COPYRIGHT (1 << 5) +#define m_AUDIO_STATUS_ADDITION (3 << 2) +#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0) +#define v_AUDIO_STATUS_NLPCM(n) ((n & 1) << 7) +#define AUDIO_N_H 0x3f +#define AUDIO_N_M 0x40 +#define AUDIO_N_L 0x41 + +#define HDMI_AUDIO_CTS_H 0x45 +#define HDMI_AUDIO_CTS_M 0x46 +#define HDMI_AUDIO_CTS_L 0x47 + +#define HDMI_DDC_CLK_L 0x4b +#define HDMI_DDC_CLK_H 0x4c + +#define HDMI_EDID_SEGMENT_POINTER 0x4d +#define HDMI_EDID_WORD_ADDR 0x4e +#define HDMI_EDID_FIFO_OFFSET 0x4f +#define HDMI_EDID_FIFO_ADDR 0x50 + +#define HDMI_PACKET_SEND_MANUAL 0x9c +#define HDMI_PACKET_SEND_AUTO 0x9d +#define m_PACKET_GCP_EN (1 << 7) +#define m_PACKET_MSI_EN (1 << 6) +#define m_PACKET_SDI_EN (1 << 5) +#define m_PACKET_VSI_EN (1 << 4) +#define v_PACKET_GCP_EN(n) ((n & 1) << 7) +#define v_PACKET_MSI_EN(n) ((n & 1) << 6) +#define v_PACKET_SDI_EN(n) ((n & 1) << 5) +#define v_PACKET_VSI_EN(n) ((n & 1) << 4) + +#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f +enum { + INFOFRAME_VSI = 0x05, + INFOFRAME_AVI = 0x06, + INFOFRAME_AAI = 0x08, +}; + +#define HDMI_CONTROL_PACKET_ADDR 0xa0 +#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11 +enum { + AVI_COLOR_MODE_RGB = 0, + AVI_COLOR_MODE_YCBCR422 = 1, + AVI_COLOR_MODE_YCBCR444 = 2, + AVI_COLORIMETRY_NO_DATA = 0, + + AVI_COLORIMETRY_SMPTE_170M = 1, + AVI_COLORIMETRY_ITU709 = 2, + AVI_COLORIMETRY_EXTENDED = 3, + + AVI_CODED_FRAME_ASPECT_NO_DATA = 0, + AVI_CODED_FRAME_ASPECT_4_3 = 1, + AVI_CODED_FRAME_ASPECT_16_9 = 2, + + ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08, + ACTIVE_ASPECT_RATE_4_3 = 0x09, + ACTIVE_ASPECT_RATE_16_9 = 0x0A, + ACTIVE_ASPECT_RATE_14_9 = 0x0B, +}; + +#define HDMI_HDCP_CTRL 0x52 +#define m_HDMI_DVI (1 << 1) +#define v_HDMI_DVI(n) (n << 1) + +#define HDMI_INTERRUPT_MASK1 0xc0 +#define HDMI_INTERRUPT_STATUS1 0xc1 +#define m_INT_ACTIVE_VSYNC (1 << 5) +#define m_INT_EDID_READY (1 << 2) + +#define HDMI_INTERRUPT_MASK2 0xc2 +#define HDMI_INTERRUPT_STATUS2 0xc3 +#define m_INT_HDCP_ERR (1 << 7) +#define m_INT_BKSV_FLAG (1 << 6) +#define m_INT_HDCP_OK (1 << 4) + +#define HDMI_STATUS 0xc8 +#define m_HOTPLUG (1 << 7) +#define m_MASK_INT_HOTPLUG (1 << 5) +#define m_INT_HOTPLUG (1 << 1) +#define v_MASK_INT_HOTPLUG(n) ((n & 0x1) << 5) + +#define HDMI_COLORBAR 0xc9 + +#define HDMI_PHY_SYNC 0xce +#define HDMI_PHY_SYS_CTL 0xe0 +#define m_TMDS_CLK_SOURCE (1 << 5) +#define v_TMDS_FROM_PLL (0 << 5) +#define v_TMDS_FROM_GEN (1 << 5) +#define m_PHASE_CLK (1 << 4) +#define v_DEFAULT_PHASE (0 << 4) +#define v_SYNC_PHASE (1 << 4) +#define m_TMDS_CURRENT_PWR (1 << 3) +#define v_TURN_ON_CURRENT (0 << 3) +#define v_CAT_OFF_CURRENT (1 << 3) +#define m_BANDGAP_PWR (1 << 2) +#define v_BANDGAP_PWR_UP (0 << 2) +#define v_BANDGAP_PWR_DOWN (1 << 2) +#define m_PLL_PWR (1 << 1) +#define v_PLL_PWR_UP (0 << 1) +#define v_PLL_PWR_DOWN (1 << 1) +#define m_TMDS_CHG_PWR (1 << 0) +#define v_TMDS_CHG_PWR_UP (0 << 0) +#define v_TMDS_CHG_PWR_DOWN (1 << 0) + +#define HDMI_PHY_CHG_PWR 0xe1 +#define v_CLK_CHG_PWR(n) ((n & 1) << 3) +#define v_DATA_CHG_PWR(n) ((n & 7) << 0) + +#define HDMI_PHY_DRIVER 0xe2 +#define v_CLK_MAIN_DRIVER(n) (n << 4) +#define v_DATA_MAIN_DRIVER(n) (n << 0) + +#define HDMI_PHY_PRE_EMPHASIS 0xe3 +#define v_PRE_EMPHASIS(n) ((n & 7) << 4) +#define v_CLK_PRE_DRIVER(n) ((n & 3) << 2) +#define v_DATA_PRE_DRIVER(n) ((n & 3) << 0) + +#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7 +#define v_FEEDBACK_DIV_LOW(n) (n & 0xff) +#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8 +#define v_FEEDBACK_DIV_HIGH(n) (n & 1) + +#define HDMI_PHY_PRE_DIV_RATIO 0xed +#define v_PRE_DIV_RATIO(n) (n & 0x1f) + +#define HDMI_CEC_CTRL 0xd0 +#define m_ADJUST_FOR_HISENSE (1 << 6) +#define m_REJECT_RX_BROADCAST (1 << 5) +#define m_BUSFREETIME_ENABLE (1 << 2) +#define m_REJECT_RX (1 << 1) +#define m_START_TX (1 << 0) + +#define HDMI_CEC_DATA 0xd1 +#define HDMI_CEC_TX_OFFSET 0xd2 +#define HDMI_CEC_RX_OFFSET 0xd3 +#define HDMI_CEC_CLK_H 0xd4 +#define HDMI_CEC_CLK_L 0xd5 +#define HDMI_CEC_TX_LENGTH 0xd6 +#define HDMI_CEC_RX_LENGTH 0xd7 +#define HDMI_CEC_TX_INT_MASK 0xd8 +#define m_TX_DONE (1 << 3) +#define m_TX_NOACK (1 << 2) +#define m_TX_BROADCAST_REJ (1 << 1) +#define m_TX_BUSNOTFREE (1 << 0) + +#define HDMI_CEC_RX_INT_MASK 0xd9 +#define m_RX_LA_ERR (1 << 4) +#define m_RX_GLITCH (1 << 3) +#define m_RX_DONE (1 << 0) + +#define HDMI_CEC_TX_INT 0xda +#define HDMI_CEC_RX_INT 0xdb +#define HDMI_CEC_BUSFREETIME_L 0xdc +#define HDMI_CEC_BUSFREETIME_H 0xdd +#define HDMI_CEC_LOGICADDR 0xde + +#endif /* __INNO_HDMI_H__ */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index a0d51ccb6ea4..896da09e49ee 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -384,36 +384,6 @@ static const struct dev_pm_ops rockchip_drm_pm_ops = { rockchip_drm_sys_resume) }; -/* - * @node: device tree node containing encoder input ports - * @encoder: drm_encoder - */ -int rockchip_drm_encoder_get_mux_id(struct device_node *node, - struct drm_encoder *encoder) -{ - struct device_node *ep; - struct drm_crtc *crtc = encoder->crtc; - struct of_endpoint endpoint; - struct device_node *port; - int ret; - - if (!node || !crtc) - return -EINVAL; - - for_each_endpoint_of_node(node, ep) { - port = of_graph_get_remote_port(ep); - of_node_put(port); - if (port == crtc->port) { - ret = of_graph_parse_endpoint(ep, &endpoint); - of_node_put(ep); - return ret ?: endpoint.id; - } - } - - return -EINVAL; -} -EXPORT_SYMBOL_GPL(rockchip_drm_encoder_get_mux_id); - static int compare_of(struct device *dev, void *data) { struct device_node *np = data; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index bb8b076f1dbb..3529f692edb8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -67,8 +67,6 @@ void rockchip_drm_atomic_work(struct work_struct *work); int rockchip_register_crtc_funcs(struct drm_crtc *crtc, const struct rockchip_crtc_funcs *crtc_funcs); void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc); -int rockchip_drm_encoder_get_mux_id(struct device_node *node, - struct drm_encoder *encoder); int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type, int out_mode); int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index db0763794edc..27342fd76e90 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -438,26 +438,6 @@ static const struct drm_crtc_helper_funcs crtc_helper_funcs = { .mode_set_base = shmob_drm_crtc_mode_set_base, }; -void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc, - struct drm_file *file) -{ - struct drm_pending_vblank_event *event; - struct drm_device *dev = scrtc->crtc.dev; - unsigned long flags; - - /* Destroy the pending vertical blanking event associated with the - * pending page flip, if any, and disable vertical blanking interrupts. - */ - spin_lock_irqsave(&dev->event_lock, flags); - event = scrtc->event; - if (event && event->base.file_priv == file) { - scrtc->event = NULL; - event->base.destroy(&event->base); - drm_vblank_put(dev, 0); - } - spin_unlock_irqrestore(&dev->event_lock, flags); -} - void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) { struct drm_pending_vblank_event *event; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h index eddad6dcc88a..38ed4ff8aaf2 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h @@ -47,8 +47,6 @@ struct shmob_drm_connector { int shmob_drm_crtc_create(struct shmob_drm_device *sdev); void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable); -void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc, - struct drm_file *file); void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc); void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc); void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc); diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 04e66e3751b4..7700ff172079 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -200,13 +200,6 @@ done: return ret; } -static void shmob_drm_preclose(struct drm_device *dev, struct drm_file *file) -{ - struct shmob_drm_device *sdev = dev->dev_private; - - shmob_drm_crtc_cancel_page_flip(&sdev->crtc, file); -} - static irqreturn_t shmob_drm_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -266,7 +259,6 @@ static struct drm_driver shmob_drm_driver = { | DRIVER_PRIME, .load = shmob_drm_load, .unload = shmob_drm_unload, - .preclose = shmob_drm_preclose, .set_busid = drm_platform_set_busid, .irq_handler = shmob_drm_irq, .get_vblank_counter = drm_vblank_no_hw_counter, diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index f2afcf5438b8..24a3735b88fd 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -440,13 +440,6 @@ static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode) { } -static bool sti_tvout_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -486,7 +479,6 @@ static void sti_dvo_encoder_disable(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs sti_dvo_encoder_helper_funcs = { .dpms = sti_tvout_encoder_dpms, - .mode_fixup = sti_tvout_encoder_mode_fixup, .mode_set = sti_tvout_encoder_mode_set, .prepare = sti_tvout_encoder_prepare, .commit = sti_dvo_encoder_commit, @@ -540,7 +532,6 @@ static void sti_hda_encoder_disable(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = { .dpms = sti_tvout_encoder_dpms, - .mode_fixup = sti_tvout_encoder_mode_fixup, .mode_set = sti_tvout_encoder_mode_set, .prepare = sti_tvout_encoder_prepare, .commit = sti_hda_encoder_commit, @@ -589,7 +580,6 @@ static void sti_hdmi_encoder_disable(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = { .dpms = sti_tvout_encoder_dpms, - .mode_fixup = sti_tvout_encoder_mode_fixup, .mode_set = sti_tvout_encoder_mode_set, .prepare = sti_tvout_encoder_prepare, .commit = sti_hdmi_encoder_commit, diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index dde6f208c347..fb2b4b0271a2 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -988,23 +988,6 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc) spin_unlock_irqrestore(&drm->event_lock, flags); } -void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) -{ - struct tegra_dc *dc = to_tegra_dc(crtc); - struct drm_device *drm = crtc->dev; - unsigned long flags; - - spin_lock_irqsave(&drm->event_lock, flags); - - if (dc->event && dc->event->base.file_priv == file) { - dc->event->base.destroy(&dc->event->base); - drm_crtc_vblank_put(crtc); - dc->event = NULL; - } - - spin_unlock_irqrestore(&drm->event_lock, flags); -} - static void tegra_dc_destroy(struct drm_crtc *crtc) { drm_crtc_cleanup(crtc); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index c5c856a0879d..8e6b18caa706 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -858,10 +858,6 @@ static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file) { struct tegra_drm_file *fpriv = file->driver_priv; struct tegra_drm_context *context, *tmp; - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) - tegra_dc_cancel_page_flip(crtc, file); list_for_each_entry_safe(context, tmp, &fpriv->contexts, list) tegra_drm_context_free(context); diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index c088f2f67eda..8a10f5b7d9dc 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -195,7 +195,6 @@ struct tegra_dc_window { u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc); void tegra_dc_enable_vblank(struct tegra_dc *dc); void tegra_dc_disable_vblank(struct tegra_dc *dc); -void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file); void tegra_dc_commit(struct tegra_dc *dc); int tegra_dc_state_setup_clock(struct tegra_dc *dc, struct drm_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 7d07733bdc86..4802da8e6d6f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -662,26 +662,6 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) return IRQ_HANDLED; } -void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) -{ - struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); - struct drm_pending_vblank_event *event; - struct drm_device *dev = crtc->dev; - unsigned long flags; - - /* Destroy the pending vertical blanking event associated with the - * pending page flip, if any, and disable vertical blanking interrupts. - */ - spin_lock_irqsave(&dev->event_lock, flags); - event = tilcdc_crtc->event; - if (event && event->base.file_priv == file) { - tilcdc_crtc->event = NULL; - event->base.destroy(&event->base); - drm_vblank_put(dev, 0); - } - spin_unlock_irqrestore(&dev->event_lock, flags); -} - struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev) { struct tilcdc_crtc *tilcdc_crtc; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index d7f5b897c6c5..8190ac3b1b32 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -350,13 +350,6 @@ fail_free_priv: return ret; } -static void tilcdc_preclose(struct drm_device *dev, struct drm_file *file) -{ - struct tilcdc_drm_private *priv = dev->dev_private; - - tilcdc_crtc_cancel_page_flip(priv->crtc, file); -} - static void tilcdc_lastclose(struct drm_device *dev) { struct tilcdc_drm_private *priv = dev->dev_private; @@ -557,7 +550,6 @@ static struct drm_driver tilcdc_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, .load = tilcdc_load, .unload = tilcdc_unload, - .preclose = tilcdc_preclose, .lastclose = tilcdc_lastclose, .set_busid = drm_platform_set_busid, .irq_handler = tilcdc_irq, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h index e863ad0d26fe..66105d8dc620 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h @@ -163,7 +163,6 @@ struct tilcdc_panel_info { #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev); -void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file); irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc); void tilcdc_crtc_update_clk(struct drm_crtc *crtc); void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index 4dda6e2f464b..8dcf02a79b23 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -70,14 +70,6 @@ static void panel_encoder_dpms(struct drm_encoder *encoder, int mode) mode == DRM_MODE_DPMS_ON ? 1 : 0); } -static bool panel_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* nothing needed */ - return true; -} - static void panel_encoder_prepare(struct drm_encoder *encoder) { struct panel_encoder *panel_encoder = to_panel_encoder(encoder); @@ -103,7 +95,6 @@ static const struct drm_encoder_funcs panel_encoder_funcs = { static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = { .dpms = panel_encoder_dpms, - .mode_fixup = panel_encoder_mode_fixup, .prepare = panel_encoder_prepare, .commit = panel_encoder_commit, .mode_set = panel_encoder_mode_set, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index 5052a8af7ecb..1c230172b402 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -80,14 +80,6 @@ static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode) tfp410_encoder->dpms = mode; } -static bool tfp410_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* nothing needed */ - return true; -} - static void tfp410_encoder_prepare(struct drm_encoder *encoder) { tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); @@ -112,7 +104,6 @@ static const struct drm_encoder_funcs tfp410_encoder_funcs = { static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = { .dpms = tfp410_encoder_dpms, - .mode_fixup = tfp410_encoder_mode_fixup, .prepare = tfp410_encoder_prepare, .commit = tfp410_encoder_commit, .mode_set = tfp410_encoder_mode_set, diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index d5728ec85254..772ec9e1f590 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -125,17 +125,5 @@ static struct usb_driver udl_driver = { .disconnect = udl_usb_disconnect, .id_table = id_table, }; - -static int __init udl_init(void) -{ - return usb_register(&udl_driver); -} - -static void __exit udl_exit(void) -{ - usb_deregister(&udl_driver); -} - -module_init(udl_init); -module_exit(udl_exit); +module_usb_driver(udl_driver); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c index a181a647fcf9..59a4b34e87ed 100644 --- a/drivers/gpu/drm/udl/udl_encoder.c +++ b/drivers/gpu/drm/udl/udl_encoder.c @@ -26,13 +26,6 @@ static void udl_encoder_disable(struct drm_encoder *encoder) { } -static bool udl_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void udl_encoder_prepare(struct drm_encoder *encoder) { } @@ -54,7 +47,6 @@ udl_encoder_dpms(struct drm_encoder *encoder, int mode) static const struct drm_encoder_helper_funcs udl_helper_funcs = { .dpms = udl_encoder_dpms, - .mode_fixup = udl_mode_fixup, .prepare = udl_encoder_prepare, .mode_set = udl_encoder_mode_set, .commit = udl_encoder_commit, diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 200419d4d43c..c427499133d6 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -409,7 +409,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, if (ufb->obj->base.import_attach) { ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf, - 0, ufb->obj->base.size, DMA_FROM_DEVICE); if (ret) goto unlock; @@ -425,7 +424,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, if (ufb->obj->base.import_attach) { dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, - 0, ufb->obj->base.size, DMA_FROM_DEVICE); } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 018145e0b87d..619dc781c517 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -49,22 +49,27 @@ struct vc4_crtc { /* Which HVS channel we're using for our CRTC. */ int channel; - /* Pointer to the actual hardware display list memory for the - * crtc. - */ - u32 __iomem *dlist; - - u32 dlist_size; /* in dwords */ - struct drm_pending_vblank_event *event; }; +struct vc4_crtc_state { + struct drm_crtc_state base; + /* Dlist area for this CRTC configuration. */ + struct drm_mm_node mm; +}; + static inline struct vc4_crtc * to_vc4_crtc(struct drm_crtc *crtc) { return (struct vc4_crtc *)crtc; } +static inline struct vc4_crtc_state * +to_vc4_crtc_state(struct drm_crtc_state *crtc_state) +{ + return (struct vc4_crtc_state *)crtc_state; +} + struct vc4_crtc_data { /* Which channel of the HVS this pixelvalve sources from. */ int hvs_channel; @@ -319,11 +324,13 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) static int vc4_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane; - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + unsigned long flags; u32 dlist_count = 0; + int ret; /* The pixelvalve can only feed one encoder (and encoders are * 1:1 with connectors.) @@ -346,18 +353,12 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, dlist_count++; /* Account for SCALER_CTL0_END. */ - if (!vc4_crtc->dlist || dlist_count > vc4_crtc->dlist_size) { - vc4_crtc->dlist = ((u32 __iomem *)vc4->hvs->dlist + - HVS_BOOTLOADER_DLIST_END); - vc4_crtc->dlist_size = ((SCALER_DLIST_SIZE >> 2) - - HVS_BOOTLOADER_DLIST_END); - - if (dlist_count > vc4_crtc->dlist_size) { - DRM_DEBUG_KMS("dlist too large for CRTC (%d > %d).\n", - dlist_count, vc4_crtc->dlist_size); - return -EINVAL; - } - } + spin_lock_irqsave(&vc4->hvs->mm_lock, flags); + ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, + dlist_count, 1, 0); + spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); + if (ret) + return ret; return 0; } @@ -368,47 +369,29 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); struct drm_plane *plane; bool debug_dump_regs = false; - u32 __iomem *dlist_next = vc4_crtc->dlist; + u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start; + u32 __iomem *dlist_next = dlist_start; if (debug_dump_regs) { DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc)); vc4_hvs_dump_state(dev); } - /* Copy all the active planes' dlist contents to the hardware dlist. - * - * XXX: If the new display list was large enough that it - * overlapped a currently-read display list, we need to do - * something like disable scanout before putting in the new - * list. For now, we're safe because we only have the two - * planes. - */ + /* Copy all the active planes' dlist contents to the hardware dlist. */ drm_atomic_crtc_for_each_plane(plane, crtc) { dlist_next += vc4_plane_write_dlist(plane, dlist_next); } - if (dlist_next == vc4_crtc->dlist) { - /* If no planes were enabled, use the SCALER_CTL0_END - * at the start of the display list memory (in the - * bootloader section). We'll rewrite that - * SCALER_CTL0_END, just in case, though. - */ - writel(SCALER_CTL0_END, vc4->hvs->dlist); - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 0); - } else { - writel(SCALER_CTL0_END, dlist_next); - dlist_next++; - - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - (u32 __iomem *)vc4_crtc->dlist - - (u32 __iomem *)vc4->hvs->dlist); - - /* Make the next display list start after ours. */ - vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); - vc4_crtc->dlist = dlist_next; - } + writel(SCALER_CTL0_END, dlist_next); + dlist_next++; + + WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); + + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); if (debug_dump_regs) { DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); @@ -573,6 +556,36 @@ static int vc4_page_flip(struct drm_crtc *crtc, return drm_atomic_helper_page_flip(crtc, fb, event, flags); } +static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct vc4_crtc_state *vc4_state; + + vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL); + if (!vc4_state) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base); + return &vc4_state->base; +} + +static void vc4_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct vc4_dev *vc4 = to_vc4_dev(crtc->dev); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); + + if (vc4_state->mm.allocated) { + unsigned long flags; + + spin_lock_irqsave(&vc4->hvs->mm_lock, flags); + drm_mm_remove_node(&vc4_state->mm); + spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); + + } + + __drm_atomic_helper_crtc_destroy_state(crtc, state); +} + static const struct drm_crtc_funcs vc4_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = vc4_crtc_destroy, @@ -581,8 +594,8 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ .reset = drm_atomic_helper_crtc_reset, - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .atomic_duplicate_state = vc4_crtc_duplicate_state, + .atomic_destroy_state = vc4_crtc_destroy_state, }; static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { @@ -593,26 +606,6 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { .atomic_flush = vc4_crtc_atomic_flush, }; -/* Frees the page flip event when the DRM device is closed with the - * event still outstanding. - */ -void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) -{ - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - struct drm_device *dev = crtc->dev; - unsigned long flags; - - spin_lock_irqsave(&dev->event_lock, flags); - - if (vc4_crtc->event && vc4_crtc->event->base.file_priv == file) { - vc4_crtc->event->base.destroy(&vc4_crtc->event->base); - drm_crtc_vblank_put(crtc); - vc4_crtc->event = NULL; - } - - spin_unlock_irqrestore(&dev->event_lock, flags); -} - static const struct vc4_crtc_data pv0_data = { .hvs_channel = 0, .encoder0_type = VC4_ENCODER_TYPE_DSI0, @@ -664,9 +657,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_crtc *vc4_crtc; struct drm_crtc *crtc; - struct drm_plane *primary_plane, *cursor_plane; + struct drm_plane *primary_plane, *cursor_plane, *destroy_plane, *temp; const struct of_device_id *match; - int ret; + int ret, i; vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL); if (!vc4_crtc) @@ -695,27 +688,49 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) goto err; } - cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); - if (IS_ERR(cursor_plane)) { - dev_err(dev, "failed to construct cursor plane\n"); - ret = PTR_ERR(cursor_plane); - goto err_primary; - } - - drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane, + drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, &vc4_crtc_funcs, NULL); drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs); primary_plane->crtc = crtc; - cursor_plane->crtc = crtc; vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc; vc4_crtc->channel = vc4_crtc->data->hvs_channel; + /* Set up some arbitrary number of planes. We're not limited + * by a set number of physical registers, just the space in + * the HVS (16k) and how small an plane can be (28 bytes). + * However, each plane we set up takes up some memory, and + * increases the cost of looping over planes, which atomic + * modesetting does quite a bit. As a result, we pick a + * modest number of planes to expose, that should hopefully + * still cover any sane usecase. + */ + for (i = 0; i < 8; i++) { + struct drm_plane *plane = + vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY); + + if (IS_ERR(plane)) + continue; + + plane->possible_crtcs = 1 << drm_crtc_index(crtc); + } + + /* Set up the legacy cursor after overlay initialization, + * since we overlay planes on the CRTC in the order they were + * initialized. + */ + cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); + if (!IS_ERR(cursor_plane)) { + cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc); + cursor_plane->crtc = crtc; + crtc->cursor = cursor_plane; + } + CRTC_WRITE(PV_INTEN, 0); CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); ret = devm_request_irq(dev, platform_get_irq(pdev, 0), vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc); if (ret) - goto err_cursor; + goto err_destroy_planes; vc4_set_crtc_possible_masks(drm, crtc); @@ -723,10 +738,12 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) return 0; -err_cursor: - cursor_plane->funcs->destroy(cursor_plane); -err_primary: - primary_plane->funcs->destroy(primary_plane); +err_destroy_planes: + list_for_each_entry_safe(destroy_plane, temp, + &drm->mode_config.plane_list, head) { + if (destroy_plane->possible_crtcs == 1 << drm_crtc_index(crtc)) + destroy_plane->funcs->destroy(destroy_plane); + } err: return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index f1655fff8425..b7d2ff0e6e1f 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -43,14 +43,6 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index) return map; } -static void vc4_drm_preclose(struct drm_device *dev, struct drm_file *file) -{ - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - vc4_cancel_page_flip(crtc, file); -} - static void vc4_lastclose(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -91,8 +83,6 @@ static struct drm_driver vc4_drm_driver = { DRIVER_HAVE_IRQ | DRIVER_PRIME), .lastclose = vc4_lastclose, - .preclose = vc4_drm_preclose, - .irq_handler = vc4_irq, .irq_preinstall = vc4_irq_preinstall, .irq_postinstall = vc4_irq_postinstall, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 080865ec2bae..3d1df6b1c4d3 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -149,7 +149,17 @@ struct vc4_v3d { struct vc4_hvs { struct platform_device *pdev; void __iomem *regs; - void __iomem *dlist; + u32 __iomem *dlist; + + /* Memory manager for CRTCs to allocate space in the display + * list. Units are dwords. + */ + struct drm_mm dlist_mm; + /* Memory manager for the LBM memory used by HVS scaling. */ + struct drm_mm lbm_mm; + spinlock_t mm_lock; + + struct drm_mm_node mitchell_netravali_filter; }; struct vc4_plane { @@ -376,7 +386,6 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); extern struct platform_driver vc4_crtc_driver; int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id); void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id); -void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file); int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); /* vc4_debugfs.c */ diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 8098c5b21ba4..6fbab1c82cb1 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -100,12 +100,76 @@ int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused) } #endif +/* The filter kernel is composed of dwords each containing 3 9-bit + * signed integers packed next to each other. + */ +#define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff) +#define VC4_PPF_FILTER_WORD(c0, c1, c2) \ + ((((c0) & 0x1ff) << 0) | \ + (((c1) & 0x1ff) << 9) | \ + (((c2) & 0x1ff) << 18)) + +/* The whole filter kernel is arranged as the coefficients 0-16 going + * up, then a pad, then 17-31 going down and reversed within the + * dwords. This means that a linear phase kernel (where it's + * symmetrical at the boundary between 15 and 16) has the last 5 + * dwords matching the first 5, but reversed. + */ +#define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8, \ + c9, c10, c11, c12, c13, c14, c15) \ + {VC4_PPF_FILTER_WORD(c0, c1, c2), \ + VC4_PPF_FILTER_WORD(c3, c4, c5), \ + VC4_PPF_FILTER_WORD(c6, c7, c8), \ + VC4_PPF_FILTER_WORD(c9, c10, c11), \ + VC4_PPF_FILTER_WORD(c12, c13, c14), \ + VC4_PPF_FILTER_WORD(c15, c15, 0)} + +#define VC4_LINEAR_PHASE_KERNEL_DWORDS 6 +#define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1) + +/* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali. + * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf + */ +static const u32 mitchell_netravali_1_3_1_3_kernel[] = + VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18, + 50, 82, 119, 155, 187, 213, 227); + +static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs, + struct drm_mm_node *space, + const u32 *kernel) +{ + int ret, i; + u32 __iomem *dst_kernel; + + ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1, + 0); + if (ret) { + DRM_ERROR("Failed to allocate space for filter kernel: %d\n", + ret); + return ret; + } + + dst_kernel = hvs->dlist + space->start; + + for (i = 0; i < VC4_KERNEL_DWORDS; i++) { + if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS) + writel(kernel[i], &dst_kernel[i]); + else { + writel(kernel[VC4_KERNEL_DWORDS - i - 1], + &dst_kernel[i]); + } + } + + return 0; +} + static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = drm->dev_private; struct vc4_hvs *hvs = NULL; + int ret; hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL); if (!hvs) @@ -119,6 +183,33 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) hvs->dlist = hvs->regs + SCALER_DLIST_START; + spin_lock_init(&hvs->mm_lock); + + /* Set up the HVS display list memory manager. We never + * overwrite the setup from the bootloader (just 128b out of + * our 16K), since we don't want to scramble the screen when + * transitioning from the firmware's boot setup to runtime. + */ + drm_mm_init(&hvs->dlist_mm, + HVS_BOOTLOADER_DLIST_END, + (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END); + + /* Set up the HVS LBM memory manager. We could have some more + * complicated data structure that allowed reuse of LBM areas + * between planes when they don't overlap on the screen, but + * for now we just allocate globally. + */ + drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024); + + /* Upload filter kernels. We only have the one for now, so we + * keep it around for the lifetime of the driver. + */ + ret = vc4_hvs_upload_linear_kernel(hvs, + &hvs->mitchell_netravali_filter, + mitchell_netravali_1_3_1_3_kernel); + if (ret) + return ret; + vc4->hvs = hvs; return 0; } @@ -129,6 +220,12 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master, struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = drm->dev_private; + if (vc4->hvs->mitchell_netravali_filter.allocated) + drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter); + + drm_mm_takedown(&vc4->hvs->dlist_mm); + drm_mm_takedown(&vc4->hvs->lbm_mm); + vc4->hvs = NULL; } diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index f95f2df5f8d1..4718ae5176cc 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -49,6 +49,15 @@ vc4_atomic_complete_commit(struct vc4_commit *c) drm_atomic_helper_commit_modeset_enables(dev, state); + /* Make sure that drm_atomic_helper_wait_for_vblanks() + * actually waits for vblank. If we're doing a full atomic + * modeset (as opposed to a vc4_update_plane() short circuit), + * then we need to wait for scanout to be done with our + * display lists before we free it and potentially reallocate + * and overwrite the dlist memory with a new modeset. + */ + state->legacy_cursor_update = false; + drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 0addbad15832..7b0c72ae02a0 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -24,19 +24,52 @@ #include "drm_fb_cma_helper.h" #include "drm_plane_helper.h" +enum vc4_scaling_mode { + VC4_SCALING_NONE, + VC4_SCALING_TPZ, + VC4_SCALING_PPF, +}; + struct vc4_plane_state { struct drm_plane_state base; + /* System memory copy of the display list for this element, computed + * at atomic_check time. + */ u32 *dlist; - u32 dlist_size; /* Number of dwords in allocated for the display list */ + u32 dlist_size; /* Number of dwords allocated for the display list */ u32 dlist_count; /* Number of used dwords in the display list. */ - /* Offset in the dlist to pointer word 0. */ - u32 pw0_offset; + /* Offset in the dlist to various words, for pageflip or + * cursor updates. + */ + u32 pos0_offset; + u32 pos2_offset; + u32 ptr0_offset; /* Offset where the plane's dlist was last stored in the - hardware at vc4_crtc_atomic_flush() time. - */ - u32 *hw_dlist; + * hardware at vc4_crtc_atomic_flush() time. + */ + u32 __iomem *hw_dlist; + + /* Clipped coordinates of the plane on the display. */ + int crtc_x, crtc_y, crtc_w, crtc_h; + /* Clipped area being scanned from in the FB. */ + u32 src_x, src_y; + + u32 src_w[2], src_h[2]; + + /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */ + enum vc4_scaling_mode x_scaling[2], y_scaling[2]; + bool is_unity; + bool is_yuv; + + /* Offset to start scanning out from the start of the plane's + * BO. + */ + u32 offsets[3]; + + /* Our allocation in LBM for temporary storage during scaling. */ + struct drm_mm_node lbm; }; static inline struct vc4_plane_state * @@ -50,6 +83,7 @@ static const struct hvs_format { u32 hvs; /* HVS_FORMAT_* */ u32 pixel_order; bool has_alpha; + bool flip_cbcr; } hvs_formats[] = { { .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, @@ -59,6 +93,48 @@ static const struct hvs_format { .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true, }, + { + .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565, + .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false, + }, + { + .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565, + .pixel_order = HVS_PIXEL_ORDER_XBGR, .has_alpha = false, + }, + { + .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, + .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true, + }, + { + .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, + .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false, + }, + { + .drm = DRM_FORMAT_YUV422, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, + }, + { + .drm = DRM_FORMAT_YVU422, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, + .flip_cbcr = true, + }, + { + .drm = DRM_FORMAT_YUV420, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, + }, + { + .drm = DRM_FORMAT_YVU420, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, + .flip_cbcr = true, + }, + { + .drm = DRM_FORMAT_NV12, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE, + }, + { + .drm = DRM_FORMAT_NV16, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE, + }, }; static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) @@ -73,6 +149,16 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) return NULL; } +static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst) +{ + if (dst > src) + return VC4_SCALING_PPF; + else if (dst < src) + return VC4_SCALING_TPZ; + else + return VC4_SCALING_NONE; +} + static bool plane_enabled(struct drm_plane_state *state) { return state->fb && state->crtc; @@ -89,6 +175,8 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane if (!vc4_state) return NULL; + memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm)); + __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base); if (vc4_state->dlist) { @@ -108,8 +196,17 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane static void vc4_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { + struct vc4_dev *vc4 = to_vc4_dev(plane->dev); struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + if (vc4_state->lbm.allocated) { + unsigned long irqflags; + + spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); + drm_mm_remove_node(&vc4_state->lbm); + spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); + } + kfree(vc4_state->dlist); __drm_atomic_helper_plane_destroy_state(plane, &vc4_state->base); kfree(state); @@ -148,84 +245,400 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) vc4_state->dlist[vc4_state->dlist_count++] = val; } +/* Returns the scl0/scl1 field based on whether the dimensions need to + * be up/down/non-scaled. + * + * This is a replication of a table from the spec. + */ +static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane) +{ + struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + + switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) { + case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF: + return SCALER_CTL0_SCL_H_PPF_V_PPF; + case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF: + return SCALER_CTL0_SCL_H_TPZ_V_PPF; + case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ: + return SCALER_CTL0_SCL_H_PPF_V_TPZ; + case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ: + return SCALER_CTL0_SCL_H_TPZ_V_TPZ; + case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE: + return SCALER_CTL0_SCL_H_PPF_V_NONE; + case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF: + return SCALER_CTL0_SCL_H_NONE_V_PPF; + case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ: + return SCALER_CTL0_SCL_H_NONE_V_TPZ; + case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE: + return SCALER_CTL0_SCL_H_TPZ_V_NONE; + default: + case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE: + /* The unity case is independently handled by + * SCALER_CTL0_UNITY. + */ + return 0; + } +} + +static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) +{ + struct drm_plane *plane = state->plane; + struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + struct drm_framebuffer *fb = state->fb; + struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); + u32 subpixel_src_mask = (1 << 16) - 1; + u32 format = fb->pixel_format; + int num_planes = drm_format_num_planes(format); + u32 h_subsample = 1; + u32 v_subsample = 1; + int i; + + for (i = 0; i < num_planes; i++) + vc4_state->offsets[i] = bo->paddr + fb->offsets[i]; + + /* We don't support subpixel source positioning for scaling. */ + if ((state->src_x & subpixel_src_mask) || + (state->src_y & subpixel_src_mask) || + (state->src_w & subpixel_src_mask) || + (state->src_h & subpixel_src_mask)) { + return -EINVAL; + } + + vc4_state->src_x = state->src_x >> 16; + vc4_state->src_y = state->src_y >> 16; + vc4_state->src_w[0] = state->src_w >> 16; + vc4_state->src_h[0] = state->src_h >> 16; + + vc4_state->crtc_x = state->crtc_x; + vc4_state->crtc_y = state->crtc_y; + vc4_state->crtc_w = state->crtc_w; + vc4_state->crtc_h = state->crtc_h; + + vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0], + vc4_state->crtc_w); + vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], + vc4_state->crtc_h); + + if (num_planes > 1) { + vc4_state->is_yuv = true; + + h_subsample = drm_format_horz_chroma_subsampling(format); + v_subsample = drm_format_vert_chroma_subsampling(format); + vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample; + vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample; + + vc4_state->x_scaling[1] = + vc4_get_scaling_mode(vc4_state->src_w[1], + vc4_state->crtc_w); + vc4_state->y_scaling[1] = + vc4_get_scaling_mode(vc4_state->src_h[1], + vc4_state->crtc_h); + + /* YUV conversion requires that scaling be enabled, + * even on a plane that's otherwise 1:1. Choose TPZ + * for simplicity. + */ + if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) + vc4_state->x_scaling[0] = VC4_SCALING_TPZ; + if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) + vc4_state->y_scaling[0] = VC4_SCALING_TPZ; + } + + vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && + vc4_state->y_scaling[0] == VC4_SCALING_NONE && + vc4_state->x_scaling[1] == VC4_SCALING_NONE && + vc4_state->y_scaling[1] == VC4_SCALING_NONE); + + /* No configuring scaling on the cursor plane, since it gets + non-vblank-synced updates, and scaling requires requires + LBM changes which have to be vblank-synced. + */ + if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity) + return -EINVAL; + + /* Clamp the on-screen start x/y to 0. The hardware doesn't + * support negative y, and negative x wastes bandwidth. + */ + if (vc4_state->crtc_x < 0) { + for (i = 0; i < num_planes; i++) { + u32 cpp = drm_format_plane_cpp(fb->pixel_format, i); + u32 subs = ((i == 0) ? 1 : h_subsample); + + vc4_state->offsets[i] += (cpp * + (-vc4_state->crtc_x) / subs); + } + vc4_state->src_w[0] += vc4_state->crtc_x; + vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample; + vc4_state->crtc_x = 0; + } + + if (vc4_state->crtc_y < 0) { + for (i = 0; i < num_planes; i++) { + u32 subs = ((i == 0) ? 1 : v_subsample); + + vc4_state->offsets[i] += (fb->pitches[i] * + (-vc4_state->crtc_y) / subs); + } + vc4_state->src_h[0] += vc4_state->crtc_y; + vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample; + vc4_state->crtc_y = 0; + } + + return 0; +} + +static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) +{ + u32 scale, recip; + + scale = (1 << 16) * src / dst; + + /* The specs note that while the reciprocal would be defined + * as (1<<32)/scale, ~0 is close enough. + */ + recip = ~0 / scale; + + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) | + VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE)); + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP)); +} + +static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) +{ + u32 scale = (1 << 16) * src / dst; + + vc4_dlist_write(vc4_state, + SCALER_PPF_AGC | + VC4_SET_FIELD(scale, SCALER_PPF_SCALE) | + VC4_SET_FIELD(0, SCALER_PPF_IPHASE)); +} + +static u32 vc4_lbm_size(struct drm_plane_state *state) +{ + struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + /* This is the worst case number. One of the two sizes will + * be used depending on the scaling configuration. + */ + u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w); + u32 lbm; + + if (!vc4_state->is_yuv) { + if (vc4_state->is_unity) + return 0; + else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) + lbm = pix_per_line * 8; + else { + /* In special cases, this multiplier might be 12. */ + lbm = pix_per_line * 16; + } + } else { + /* There are cases for this going down to a multiplier + * of 2, but according to the firmware source, the + * table in the docs is somewhat wrong. + */ + lbm = pix_per_line * 16; + } + + lbm = roundup(lbm, 32); + + return lbm; +} + +static void vc4_write_scaling_parameters(struct drm_plane_state *state, + int channel) +{ + struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + + /* Ch0 H-PPF Word 0: Scaling Parameters */ + if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) { + vc4_write_ppf(vc4_state, + vc4_state->src_w[channel], vc4_state->crtc_w); + } + + /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */ + if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) { + vc4_write_ppf(vc4_state, + vc4_state->src_h[channel], vc4_state->crtc_h); + vc4_dlist_write(vc4_state, 0xc0c0c0c0); + } + + /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */ + if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) { + vc4_write_tpz(vc4_state, + vc4_state->src_w[channel], vc4_state->crtc_w); + } + + /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */ + if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) { + vc4_write_tpz(vc4_state, + vc4_state->src_h[channel], vc4_state->crtc_h); + vc4_dlist_write(vc4_state, 0xc0c0c0c0); + } +} + /* Writes out a full display list for an active plane to the plane's * private dlist state. */ static int vc4_plane_mode_set(struct drm_plane *plane, struct drm_plane_state *state) { + struct vc4_dev *vc4 = to_vc4_dev(plane->dev); struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct drm_framebuffer *fb = state->fb; - struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); u32 ctl0_offset = vc4_state->dlist_count; const struct hvs_format *format = vc4_get_hvs_format(fb->pixel_format); - uint32_t offset = fb->offsets[0]; - int crtc_x = state->crtc_x; - int crtc_y = state->crtc_y; - int crtc_w = state->crtc_w; - int crtc_h = state->crtc_h; - - if (state->crtc_w << 16 != state->src_w || - state->crtc_h << 16 != state->src_h) { - /* We don't support scaling yet, which involves - * allocating the LBM memory for scaling temporary - * storage, and putting filter kernels in the HVS - * context. - */ - return -EINVAL; + int num_planes = drm_format_num_planes(format->drm); + u32 scl0, scl1; + u32 lbm_size; + unsigned long irqflags; + int ret, i; + + ret = vc4_plane_setup_clipping_and_scaling(state); + if (ret) + return ret; + + /* Allocate the LBM memory that the HVS will use for temporary + * storage due to our scaling/format conversion. + */ + lbm_size = vc4_lbm_size(state); + if (lbm_size) { + if (!vc4_state->lbm.allocated) { + spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); + ret = drm_mm_insert_node(&vc4->hvs->lbm_mm, + &vc4_state->lbm, + lbm_size, 32, 0); + spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); + } else { + WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); + } } - if (crtc_x < 0) { - offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; - crtc_w += crtc_x; - crtc_x = 0; - } + if (ret) + return ret; - if (crtc_y < 0) { - offset += fb->pitches[0] * -crtc_y; - crtc_h += crtc_y; - crtc_y = 0; + /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB + * and 4:4:4, scl1 should be set to scl0 so both channels of + * the scaler do the same thing. For YUV, the Y plane needs + * to be put in channel 1 and Cb/Cr in channel 0, so we swap + * the scl fields here. + */ + if (num_planes == 1) { + scl0 = vc4_get_scl_field(state, 1); + scl1 = scl0; + } else { + scl0 = vc4_get_scl_field(state, 1); + scl1 = vc4_get_scl_field(state, 0); } + /* Control word */ vc4_dlist_write(vc4_state, SCALER_CTL0_VALID | (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | - SCALER_CTL0_UNITY); + (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) | + VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | + VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1)); /* Position Word 0: Image Positions and Alpha Value */ + vc4_state->pos0_offset = vc4_state->dlist_count; vc4_dlist_write(vc4_state, VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) | - VC4_SET_FIELD(crtc_x, SCALER_POS0_START_X) | - VC4_SET_FIELD(crtc_y, SCALER_POS0_START_Y)); - - /* Position Word 1: Scaled Image Dimensions. - * Skipped due to SCALER_CTL0_UNITY scaling. - */ + VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) | + VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y)); + + /* Position Word 1: Scaled Image Dimensions. */ + if (!vc4_state->is_unity) { + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(vc4_state->crtc_w, + SCALER_POS1_SCL_WIDTH) | + VC4_SET_FIELD(vc4_state->crtc_h, + SCALER_POS1_SCL_HEIGHT)); + } /* Position Word 2: Source Image Size, Alpha Mode */ + vc4_state->pos2_offset = vc4_state->dlist_count; vc4_dlist_write(vc4_state, VC4_SET_FIELD(format->has_alpha ? SCALER_POS2_ALPHA_MODE_PIPELINE : SCALER_POS2_ALPHA_MODE_FIXED, SCALER_POS2_ALPHA_MODE) | - VC4_SET_FIELD(crtc_w, SCALER_POS2_WIDTH) | - VC4_SET_FIELD(crtc_h, SCALER_POS2_HEIGHT)); + VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) | + VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT)); /* Position Word 3: Context. Written by the HVS. */ vc4_dlist_write(vc4_state, 0xc0c0c0c0); - vc4_state->pw0_offset = vc4_state->dlist_count; - /* Pointer Word 0: RGB / Y Pointer */ - vc4_dlist_write(vc4_state, bo->paddr + offset); + /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers + * + * The pointers may be any byte address. + */ + vc4_state->ptr0_offset = vc4_state->dlist_count; + if (!format->flip_cbcr) { + for (i = 0; i < num_planes; i++) + vc4_dlist_write(vc4_state, vc4_state->offsets[i]); + } else { + WARN_ON_ONCE(num_planes != 3); + vc4_dlist_write(vc4_state, vc4_state->offsets[0]); + vc4_dlist_write(vc4_state, vc4_state->offsets[2]); + vc4_dlist_write(vc4_state, vc4_state->offsets[1]); + } - /* Pointer Context Word 0: Written by the HVS */ - vc4_dlist_write(vc4_state, 0xc0c0c0c0); + /* Pointer Context Word 0/1/2: Written by the HVS */ + for (i = 0; i < num_planes; i++) + vc4_dlist_write(vc4_state, 0xc0c0c0c0); - /* Pitch word 0: Pointer 0 Pitch */ - vc4_dlist_write(vc4_state, - VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH)); + /* Pitch word 0/1/2 */ + for (i = 0; i < num_planes; i++) { + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH)); + } + + /* Colorspace conversion words */ + if (vc4_state->is_yuv) { + vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5); + vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5); + vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); + } + + if (!vc4_state->is_unity) { + /* LBM Base Address. */ + if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || + vc4_state->y_scaling[1] != VC4_SCALING_NONE) { + vc4_dlist_write(vc4_state, vc4_state->lbm.start); + } + + if (num_planes > 1) { + /* Emit Cb/Cr as channel 0 and Y as channel + * 1. This matches how we set up scl0/scl1 + * above. + */ + vc4_write_scaling_parameters(state, 1); + } + vc4_write_scaling_parameters(state, 0); + + /* If any PPF setup was done, then all the kernel + * pointers get uploaded. + */ + if (vc4_state->x_scaling[0] == VC4_SCALING_PPF || + vc4_state->y_scaling[0] == VC4_SCALING_PPF || + vc4_state->x_scaling[1] == VC4_SCALING_PPF || + vc4_state->y_scaling[1] == VC4_SCALING_PPF) { + u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start, + SCALER_PPF_KERNEL_OFFSET); + + /* HPPF plane 0 */ + vc4_dlist_write(vc4_state, kernel); + /* VPPF plane 0 */ + vc4_dlist_write(vc4_state, kernel); + /* HPPF plane 1 */ + vc4_dlist_write(vc4_state, kernel); + /* VPPF plane 1 */ + vc4_dlist_write(vc4_state, kernel); + } + } vc4_state->dlist[ctl0_offset] |= VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE); @@ -303,13 +716,13 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) * scanout will start from this address as soon as the FIFO * needs to refill with pixels. */ - writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]); + writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]); /* Also update the CPU-side dlist copy, so that any later * atomic updates that don't do a new modeset on our plane * also use our updated address. */ - vc4_state->dlist[vc4_state->pw0_offset] = addr; + vc4_state->dlist[vc4_state->ptr0_offset] = addr; } static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { @@ -325,8 +738,83 @@ static void vc4_plane_destroy(struct drm_plane *plane) drm_plane_cleanup(plane); } +/* Implements immediate (non-vblank-synced) updates of the cursor + * position, or falls back to the atomic helper otherwise. + */ +static int +vc4_update_plane(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_plane_state *plane_state; + struct vc4_plane_state *vc4_state; + + if (plane != crtc->cursor) + goto out; + + plane_state = plane->state; + vc4_state = to_vc4_plane_state(plane_state); + + if (!plane_state) + goto out; + + /* If we're changing the cursor contents, do that in the + * normal vblank-synced atomic path. + */ + if (fb != plane_state->fb) + goto out; + + /* No configuring new scaling in the fast path. */ + if (crtc_w != plane_state->crtc_w || + crtc_h != plane_state->crtc_h || + src_w != plane_state->src_w || + src_h != plane_state->src_h) { + goto out; + } + + /* Set the cursor's position on the screen. This is the + * expected change from the drm_mode_cursor_universal() + * helper. + */ + plane_state->crtc_x = crtc_x; + plane_state->crtc_y = crtc_y; + + /* Allow changing the start position within the cursor BO, if + * that matters. + */ + plane_state->src_x = src_x; + plane_state->src_y = src_y; + + /* Update the display list based on the new crtc_x/y. */ + vc4_plane_atomic_check(plane, plane_state); + + /* Note that we can't just call vc4_plane_write_dlist() + * because that would smash the context data that the HVS is + * currently using. + */ + writel(vc4_state->dlist[vc4_state->pos0_offset], + &vc4_state->hw_dlist[vc4_state->pos0_offset]); + writel(vc4_state->dlist[vc4_state->pos2_offset], + &vc4_state->hw_dlist[vc4_state->pos2_offset]); + writel(vc4_state->dlist[vc4_state->ptr0_offset], + &vc4_state->hw_dlist[vc4_state->ptr0_offset]); + + return 0; + +out: + return drm_atomic_helper_update_plane(plane, crtc, fb, + crtc_x, crtc_y, + crtc_w, crtc_h, + src_x, src_y, + src_w, src_h); +} + static const struct drm_plane_funcs vc4_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, + .update_plane = vc4_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = vc4_plane_destroy, .set_property = NULL, @@ -341,6 +829,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, struct drm_plane *plane = NULL; struct vc4_plane *vc4_plane; u32 formats[ARRAY_SIZE(hvs_formats)]; + u32 num_formats = 0; int ret = 0; unsigned i; @@ -351,12 +840,20 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, goto fail; } - for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) - formats[i] = hvs_formats[i].drm; + for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { + /* Don't allow YUV in cursor planes, since that means + * tuning on the scaler, which we don't allow for the + * cursor. + */ + if (type != DRM_PLANE_TYPE_CURSOR || + hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) { + formats[num_formats++] = hvs_formats[i].drm; + } + } plane = &vc4_plane->base; ret = drm_universal_plane_init(dev, plane, 0xff, &vc4_plane_funcs, - formats, ARRAY_SIZE(formats), + formats, num_formats, type, NULL); drm_plane_helper_add(plane, &vc4_plane_helper_funcs); diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 4e52a0a88551..25df20ef939c 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -503,7 +503,12 @@ enum hvs_pixel_format { HVS_PIXEL_FORMAT_RGB888 = 5, HVS_PIXEL_FORMAT_RGBA6666 = 6, /* 32bpp */ - HVS_PIXEL_FORMAT_RGBA8888 = 7 + HVS_PIXEL_FORMAT_RGBA8888 = 7, + + HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE = 8, + HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE = 9, + HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE = 10, + HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE = 11, }; /* Note: the LSB is the rightmost character shown. Only valid for @@ -536,6 +541,21 @@ enum hvs_pixel_format { #define SCALER_CTL0_ORDER_MASK VC4_MASK(14, 13) #define SCALER_CTL0_ORDER_SHIFT 13 +#define SCALER_CTL0_SCL1_MASK VC4_MASK(10, 8) +#define SCALER_CTL0_SCL1_SHIFT 8 + +#define SCALER_CTL0_SCL0_MASK VC4_MASK(7, 5) +#define SCALER_CTL0_SCL0_SHIFT 5 + +#define SCALER_CTL0_SCL_H_PPF_V_PPF 0 +#define SCALER_CTL0_SCL_H_TPZ_V_PPF 1 +#define SCALER_CTL0_SCL_H_PPF_V_TPZ 2 +#define SCALER_CTL0_SCL_H_TPZ_V_TPZ 3 +#define SCALER_CTL0_SCL_H_PPF_V_NONE 4 +#define SCALER_CTL0_SCL_H_NONE_V_PPF 5 +#define SCALER_CTL0_SCL_H_NONE_V_TPZ 6 +#define SCALER_CTL0_SCL_H_TPZ_V_NONE 7 + /* Set to indicate no scaling. */ #define SCALER_CTL0_UNITY BIT(4) @@ -551,6 +571,12 @@ enum hvs_pixel_format { #define SCALER_POS0_START_X_MASK VC4_MASK(11, 0) #define SCALER_POS0_START_X_SHIFT 0 +#define SCALER_POS1_SCL_HEIGHT_MASK VC4_MASK(27, 16) +#define SCALER_POS1_SCL_HEIGHT_SHIFT 16 + +#define SCALER_POS1_SCL_WIDTH_MASK VC4_MASK(11, 0) +#define SCALER_POS1_SCL_WIDTH_SHIFT 0 + #define SCALER_POS2_ALPHA_MODE_MASK VC4_MASK(31, 30) #define SCALER_POS2_ALPHA_MODE_SHIFT 30 #define SCALER_POS2_ALPHA_MODE_PIPELINE 0 @@ -564,6 +590,80 @@ enum hvs_pixel_format { #define SCALER_POS2_WIDTH_MASK VC4_MASK(11, 0) #define SCALER_POS2_WIDTH_SHIFT 0 +/* Color Space Conversion words. Some values are S2.8 signed + * integers, except that the 2 integer bits map as {0x0: 0, 0x1: 1, + * 0x2: 2, 0x3: -1} + */ +/* bottom 8 bits of S2.8 contribution of Cr to Blue */ +#define SCALER_CSC0_COEF_CR_BLU_MASK VC4_MASK(31, 24) +#define SCALER_CSC0_COEF_CR_BLU_SHIFT 24 +/* Signed offset to apply to Y before CSC. (Y' = Y + YY_OFS) */ +#define SCALER_CSC0_COEF_YY_OFS_MASK VC4_MASK(23, 16) +#define SCALER_CSC0_COEF_YY_OFS_SHIFT 16 +/* Signed offset to apply to CB before CSC (Cb' = Cb - 128 + CB_OFS). */ +#define SCALER_CSC0_COEF_CB_OFS_MASK VC4_MASK(15, 8) +#define SCALER_CSC0_COEF_CB_OFS_SHIFT 8 +/* Signed offset to apply to CB before CSC (Cr' = Cr - 128 + CR_OFS). */ +#define SCALER_CSC0_COEF_CR_OFS_MASK VC4_MASK(7, 0) +#define SCALER_CSC0_COEF_CR_OFS_SHIFT 0 +#define SCALER_CSC0_ITR_R_601_5 0x00f00000 +#define SCALER_CSC0_ITR_R_709_3 0x00f00000 +#define SCALER_CSC0_JPEG_JFIF 0x00000000 + +/* S2.8 contribution of Cb to Green */ +#define SCALER_CSC1_COEF_CB_GRN_MASK VC4_MASK(31, 22) +#define SCALER_CSC1_COEF_CB_GRN_SHIFT 22 +/* S2.8 contribution of Cr to Green */ +#define SCALER_CSC1_COEF_CR_GRN_MASK VC4_MASK(21, 12) +#define SCALER_CSC1_COEF_CR_GRN_SHIFT 12 +/* S2.8 contribution of Y to all of RGB */ +#define SCALER_CSC1_COEF_YY_ALL_MASK VC4_MASK(11, 2) +#define SCALER_CSC1_COEF_YY_ALL_SHIFT 2 +/* top 2 bits of S2.8 contribution of Cr to Blue */ +#define SCALER_CSC1_COEF_CR_BLU_MASK VC4_MASK(1, 0) +#define SCALER_CSC1_COEF_CR_BLU_SHIFT 0 +#define SCALER_CSC1_ITR_R_601_5 0xe73304a8 +#define SCALER_CSC1_ITR_R_709_3 0xf2b784a8 +#define SCALER_CSC1_JPEG_JFIF 0xea34a400 + +/* S2.8 contribution of Cb to Red */ +#define SCALER_CSC2_COEF_CB_RED_MASK VC4_MASK(29, 20) +#define SCALER_CSC2_COEF_CB_RED_SHIFT 20 +/* S2.8 contribution of Cr to Red */ +#define SCALER_CSC2_COEF_CR_RED_MASK VC4_MASK(19, 10) +#define SCALER_CSC2_COEF_CR_RED_SHIFT 10 +/* S2.8 contribution of Cb to Blue */ +#define SCALER_CSC2_COEF_CB_BLU_MASK VC4_MASK(19, 10) +#define SCALER_CSC2_COEF_CB_BLU_SHIFT 10 +#define SCALER_CSC2_ITR_R_601_5 0x00066204 +#define SCALER_CSC2_ITR_R_709_3 0x00072a1c +#define SCALER_CSC2_JPEG_JFIF 0x000599c5 + +#define SCALER_TPZ0_VERT_RECALC BIT(31) +#define SCALER_TPZ0_SCALE_MASK VC4_MASK(28, 8) +#define SCALER_TPZ0_SCALE_SHIFT 8 +#define SCALER_TPZ0_IPHASE_MASK VC4_MASK(7, 0) +#define SCALER_TPZ0_IPHASE_SHIFT 0 +#define SCALER_TPZ1_RECIP_MASK VC4_MASK(15, 0) +#define SCALER_TPZ1_RECIP_SHIFT 0 + +/* Skips interpolating coefficients to 64 phases, so just 8 are used. + * Required for nearest neighbor. + */ +#define SCALER_PPF_NOINTERP BIT(31) +/* Replaes the highest valued coefficient with one that makes all 4 + * sum to unity. + */ +#define SCALER_PPF_AGC BIT(30) +#define SCALER_PPF_SCALE_MASK VC4_MASK(24, 8) +#define SCALER_PPF_SCALE_SHIFT 8 +#define SCALER_PPF_IPHASE_MASK VC4_MASK(6, 0) +#define SCALER_PPF_IPHASE_SHIFT 0 + +#define SCALER_PPF_KERNEL_OFFSET_MASK VC4_MASK(13, 0) +#define SCALER_PPF_KERNEL_OFFSET_SHIFT 0 +#define SCALER_PPF_KERNEL_UNCACHED BIT(31) + #define SCALER_SRC_PITCH_MASK VC4_MASK(15, 0) #define SCALER_SRC_PITCH_SHIFT 0 diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index a165f03eaa79..429aa311685a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -282,13 +282,6 @@ static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { .atomic_check = virtio_gpu_crtc_atomic_check, }; -static bool virtio_gpu_enc_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -362,7 +355,6 @@ virtio_gpu_best_encoder(struct drm_connector *connector) } static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { - .mode_fixup = virtio_gpu_enc_mode_fixup, .mode_set = virtio_gpu_enc_mode_set, .enable = virtio_gpu_enc_enable, .disable = virtio_gpu_enc_disable, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b40ed6061f05..7f898cfdc746 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -118,7 +118,7 @@ static const struct file_operations virtio_gpu_driver_fops = { static struct drm_driver driver = { - .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER, + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, .set_busid = drm_virtio_set_busid, .load = virtio_gpu_driver_load, .unload = virtio_gpu_driver_unload, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 572fb351feab..70b44a2345ab 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -68,10 +68,17 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, struct virtio_gpu_object *bo; uint32_t handle; - if (plane->fb) { - vgfb = to_virtio_gpu_framebuffer(plane->fb); + if (plane->state->fb) { + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); bo = gem_to_virtio_gpu_obj(vgfb->obj); handle = bo->hw_res_handle; + if (bo->dumb) { + virtio_gpu_cmd_transfer_to_host_2d + (vgdev, handle, 0, + cpu_to_le32(plane->state->crtc_w), + cpu_to_le32(plane->state->crtc_h), + plane->state->crtc_x, plane->state->crtc_y, NULL); + } } else { handle = 0; } @@ -84,6 +91,11 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, plane->state->crtc_h, plane->state->crtc_x, plane->state->crtc_y); + virtio_gpu_cmd_resource_flush(vgdev, handle, + plane->state->crtc_x, + plane->state->crtc_y, + plane->state->crtc_w, + plane->state->crtc_h); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 24fb348a44e1..0ee76e523a90 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -972,15 +972,6 @@ static int vmw_driver_unload(struct drm_device *dev) return 0; } -static void vmw_preclose(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); - struct vmw_private *dev_priv = vmw_priv(dev); - - vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); -} - static void vmw_postclose(struct drm_device *dev, struct drm_file *file_priv) { @@ -1011,7 +1002,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) if (unlikely(vmw_fp == NULL)) return ret; - INIT_LIST_HEAD(&vmw_fp->fence_events); vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; @@ -1501,7 +1491,6 @@ static struct drm_driver driver = { .master_set = vmw_master_set, .master_drop = vmw_master_drop, .open = vmw_driver_open, - .preclose = vmw_preclose, .postclose = vmw_postclose, .set_busid = drm_pci_set_busid, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 469cdd520615..5cb1b1687cd4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -80,7 +80,6 @@ struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; - struct list_head fence_events; bool gb_aware; }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 8e689b439890..e959df6ede83 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -71,7 +71,6 @@ struct vmw_user_fence { */ struct vmw_event_fence_action { struct vmw_fence_action action; - struct list_head fpriv_head; struct drm_pending_event *event; struct vmw_fence_obj *fence; @@ -808,44 +807,6 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, } /** - * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects - * - * @fman: Pointer to a struct vmw_fence_manager - * @event_list: Pointer to linked list of struct vmw_event_fence_action objects - * with pointers to a struct drm_file object about to be closed. - * - * This function removes all pending fence events with references to a - * specific struct drm_file object about to be closed. The caller is required - * to pass a list of all struct vmw_event_fence_action objects with such - * events attached. This function is typically called before the - * struct drm_file object's event management is taken down. - */ -void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman, - struct list_head *event_list) -{ - struct vmw_event_fence_action *eaction; - struct drm_pending_event *event; - unsigned long irq_flags; - - while (1) { - spin_lock_irqsave(&fman->lock, irq_flags); - if (list_empty(event_list)) - goto out_unlock; - eaction = list_first_entry(event_list, - struct vmw_event_fence_action, - fpriv_head); - list_del_init(&eaction->fpriv_head); - event = eaction->event; - eaction->event = NULL; - spin_unlock_irqrestore(&fman->lock, irq_flags); - event->destroy(event); - } -out_unlock: - spin_unlock_irqrestore(&fman->lock, irq_flags); -} - - -/** * vmw_event_fence_action_seq_passed * * @action: The struct vmw_fence_action embedded in a struct @@ -879,10 +840,8 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) *eaction->tv_usec = tv.tv_usec; } - list_del_init(&eaction->fpriv_head); - list_add_tail(&eaction->event->link, &file_priv->event_list); + drm_send_event_locked(dev, eaction->event); eaction->event = NULL; - wake_up_all(&file_priv->event_wait); spin_unlock_irqrestore(&dev->event_lock, irq_flags); } @@ -899,12 +858,6 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) { struct vmw_event_fence_action *eaction = container_of(action, struct vmw_event_fence_action, action); - struct vmw_fence_manager *fman = fman_from_fence(eaction->fence); - unsigned long irq_flags; - - spin_lock_irqsave(&fman->lock, irq_flags); - list_del(&eaction->fpriv_head); - spin_unlock_irqrestore(&fman->lock, irq_flags); vmw_fence_obj_unreference(&eaction->fence); kfree(eaction); @@ -984,8 +937,6 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv, { struct vmw_event_fence_action *eaction; struct vmw_fence_manager *fman = fman_from_fence(fence); - struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); - unsigned long irq_flags; eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); if (unlikely(eaction == NULL)) @@ -1002,10 +953,6 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv, eaction->tv_sec = tv_sec; eaction->tv_usec = tv_usec; - spin_lock_irqsave(&fman->lock, irq_flags); - list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events); - spin_unlock_irqrestore(&fman->lock, irq_flags); - vmw_fence_obj_add_action(fence, &eaction->action); return 0; @@ -1025,38 +972,26 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, struct vmw_event_fence_pending *event; struct vmw_fence_manager *fman = fman_from_fence(fence); struct drm_device *dev = fman->dev_priv->dev; - unsigned long irq_flags; int ret; - spin_lock_irqsave(&dev->event_lock, irq_flags); - - ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0; - if (likely(ret == 0)) - file_priv->event_space -= sizeof(event->event); - - spin_unlock_irqrestore(&dev->event_lock, irq_flags); - - if (unlikely(ret != 0)) { - DRM_ERROR("Failed to allocate event space for this file.\n"); - goto out_no_space; - } - - event = kzalloc(sizeof(*event), GFP_KERNEL); if (unlikely(event == NULL)) { DRM_ERROR("Failed to allocate an event.\n"); ret = -ENOMEM; - goto out_no_event; + goto out_no_space; } event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; event->event.base.length = sizeof(*event); event->event.user_data = user_data; - event->base.event = &event->event.base; - event->base.file_priv = file_priv; - event->base.destroy = (void (*) (struct drm_pending_event *)) kfree; + ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate event space for this file.\n"); + kfree(event); + goto out_no_space; + } if (flags & DRM_VMW_FE_FLAG_REQ_TIME) ret = vmw_event_fence_action_queue(file_priv, fence, @@ -1076,11 +1011,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, return 0; out_no_queue: - event->base.destroy(&event->base); -out_no_event: - spin_lock_irqsave(&dev->event_lock, irq_flags); - file_priv->event_space += sizeof(*event); - spin_unlock_irqrestore(&dev->event_lock, irq_flags); + drm_event_cancel_free(dev, &event->base); out_no_space: return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 8be6c29f5eb5..83ae301ee141 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h @@ -116,8 +116,6 @@ extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman, - struct list_head *event_list); extern int vmw_event_fence_action_queue(struct drm_file *filee_priv, struct vmw_fence_obj *fence, struct drm_pending_event *event, diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c index d3ad5347342c..2f29780e7c68 100644 --- a/drivers/gpu/ipu-v3/ipu-dc.c +++ b/drivers/gpu/ipu-v3/ipu-dc.c @@ -171,6 +171,7 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced, u32 bus_format, u32 width) { struct ipu_dc_priv *priv = dc->priv; + int addr, sync; u32 reg = 0; int map; @@ -182,41 +183,39 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced, return map; } - if (interlaced) { - int addr; - - if (dc->di) - addr = 1; - else - addr = 0; + /* + * In interlaced mode we need more counters to create the asymmetric + * per-field VSYNC signals. The pixel active signal synchronising DC + * to DI moves to signal generator #6 (see ipu-di.c). In progressive + * mode counter #5 is used. + */ + sync = interlaced ? 6 : 5; + + /* Reserve 5 microcode template words for each DI */ + if (dc->di) + addr = 5; + else + addr = 0; + if (interlaced) { dc_link_event(dc, DC_EVT_NL, addr, 3); dc_link_event(dc, DC_EVT_EOL, addr, 2); dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1); /* Init template microcode */ - dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, 6, 1); + dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1); } else { - if (dc->di) { - dc_link_event(dc, DC_EVT_NL, 2, 3); - dc_link_event(dc, DC_EVT_EOL, 3, 2); - dc_link_event(dc, DC_EVT_NEW_DATA, 1, 1); - /* Init template microcode */ - dc_write_tmpl(dc, 2, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1); - dc_write_tmpl(dc, 3, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0); - dc_write_tmpl(dc, 4, WRG, 0, map, NULL_WAVE, 0, 0, 1); - dc_write_tmpl(dc, 1, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1); - } else { - dc_link_event(dc, DC_EVT_NL, 5, 3); - dc_link_event(dc, DC_EVT_EOL, 6, 2); - dc_link_event(dc, DC_EVT_NEW_DATA, 8, 1); - /* Init template microcode */ - dc_write_tmpl(dc, 5, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1); - dc_write_tmpl(dc, 6, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0); - dc_write_tmpl(dc, 7, WRG, 0, map, NULL_WAVE, 0, 0, 1); - dc_write_tmpl(dc, 8, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1); - } + dc_link_event(dc, DC_EVT_NL, addr + 2, 3); + dc_link_event(dc, DC_EVT_EOL, addr + 3, 2); + dc_link_event(dc, DC_EVT_NEW_DATA, addr + 1, 1); + + /* Init template microcode */ + dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1); + dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0); + dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1); + dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1); } + dc_link_event(dc, DC_EVT_NF, 0, 0); dc_link_event(dc, DC_EVT_NFIELD, 0, 0); dc_link_event(dc, DC_EVT_EOF, 0, 0); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 665ab9fd0e01..cbd7c986d926 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -74,9 +74,17 @@ * there can thus be up to three clients: Two vga clients (GPUs) and one audio * client (on the discrete GPU). The code is mostly prepared to support * machines with more than two GPUs should they become available. + * * The GPU to which the outputs are currently switched is called the * active client in vga_switcheroo parlance. The GPU not in use is the - * inactive client. + * inactive client. When the inactive client's DRM driver is loaded, + * it will be unable to probe the panel's EDID and hence depends on + * VBIOS to provide its display modes. If the VBIOS modes are bogus or + * if there is no VBIOS at all (which is common on the MacBook Pro), + * a client may alternatively request that the DDC lines are temporarily + * switched to it, provided that the handler supports this. Switching + * only the DDC lines and not the entire output avoids unnecessary + * flickering. */ /** @@ -126,6 +134,10 @@ static DEFINE_MUTEX(vgasr_mutex); * (counting only vga clients, not audio clients) * @clients: list of registered clients * @handler: registered handler + * @handler_flags: flags of registered handler + * @mux_hw_lock: protects mux state + * (in particular while DDC lines are temporarily switched) + * @old_ddc_owner: client to which DDC lines will be switched back on unlock * * vga_switcheroo private data. Currently only one vga_switcheroo instance * per system is supported. @@ -142,6 +154,9 @@ struct vgasr_priv { struct list_head clients; const struct vga_switcheroo_handler *handler; + enum vga_switcheroo_handler_flags_t handler_flags; + struct mutex mux_hw_lock; + int old_ddc_owner; }; #define ID_BIT_AUDIO 0x100 @@ -156,6 +171,7 @@ static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv); /* only one switcheroo per system */ static struct vgasr_priv vgasr_priv = { .clients = LIST_HEAD_INIT(vgasr_priv.clients), + .mux_hw_lock = __MUTEX_INITIALIZER(vgasr_priv.mux_hw_lock), }; static bool vga_switcheroo_ready(void) @@ -190,13 +206,15 @@ static void vga_switcheroo_enable(void) /** * vga_switcheroo_register_handler() - register handler * @handler: handler callbacks + * @handler_flags: handler flags * * Register handler. Enable vga_switcheroo if two vga clients have already * registered. * * Return: 0 on success, -EINVAL if a handler was already registered. */ -int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) +int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler, + enum vga_switcheroo_handler_flags_t handler_flags) { mutex_lock(&vgasr_mutex); if (vgasr_priv.handler) { @@ -205,6 +223,7 @@ int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler } vgasr_priv.handler = handler; + vgasr_priv.handler_flags = handler_flags; if (vga_switcheroo_ready()) { pr_info("enabled\n"); vga_switcheroo_enable(); @@ -222,16 +241,33 @@ EXPORT_SYMBOL(vga_switcheroo_register_handler); void vga_switcheroo_unregister_handler(void) { mutex_lock(&vgasr_mutex); + mutex_lock(&vgasr_priv.mux_hw_lock); + vgasr_priv.handler_flags = 0; vgasr_priv.handler = NULL; if (vgasr_priv.active) { pr_info("disabled\n"); vga_switcheroo_debugfs_fini(&vgasr_priv); vgasr_priv.active = false; } + mutex_unlock(&vgasr_priv.mux_hw_lock); mutex_unlock(&vgasr_mutex); } EXPORT_SYMBOL(vga_switcheroo_unregister_handler); +/** + * vga_switcheroo_handler_flags() - obtain handler flags + * + * Helper for clients to obtain the handler flags bitmask. + * + * Return: Handler flags. A value of 0 means that no handler is registered + * or that the handler has no special capabilities. + */ +enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) +{ + return vgasr_priv.handler_flags; +} +EXPORT_SYMBOL(vga_switcheroo_handler_flags); + static int register_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, enum vga_switcheroo_client_id id, bool active, @@ -413,6 +449,76 @@ void vga_switcheroo_client_fb_set(struct pci_dev *pdev, EXPORT_SYMBOL(vga_switcheroo_client_fb_set); /** + * vga_switcheroo_lock_ddc() - temporarily switch DDC lines to a given client + * @pdev: client pci device + * + * Temporarily switch DDC lines to the client identified by @pdev + * (but leave the outputs otherwise switched to where they are). + * This allows the inactive client to probe EDID. The DDC lines must + * afterwards be switched back by calling vga_switcheroo_unlock_ddc(), + * even if this function returns an error. + * + * Return: Previous DDC owner on success or a negative int on error. + * Specifically, %-ENODEV if no handler has registered or if the handler + * does not support switching the DDC lines. Also, a negative value + * returned by the handler is propagated back to the caller. + * The return value has merely an informational purpose for any caller + * which might be interested in it. It is acceptable to ignore the return + * value and simply rely on the result of the subsequent EDID probe, + * which will be %NULL if DDC switching failed. + */ +int vga_switcheroo_lock_ddc(struct pci_dev *pdev) +{ + enum vga_switcheroo_client_id id; + + mutex_lock(&vgasr_priv.mux_hw_lock); + if (!vgasr_priv.handler || !vgasr_priv.handler->switch_ddc) { + vgasr_priv.old_ddc_owner = -ENODEV; + return -ENODEV; + } + + id = vgasr_priv.handler->get_client_id(pdev); + vgasr_priv.old_ddc_owner = vgasr_priv.handler->switch_ddc(id); + return vgasr_priv.old_ddc_owner; +} +EXPORT_SYMBOL(vga_switcheroo_lock_ddc); + +/** + * vga_switcheroo_unlock_ddc() - switch DDC lines back to previous owner + * @pdev: client pci device + * + * Switch DDC lines back to the previous owner after calling + * vga_switcheroo_lock_ddc(). This must be called even if + * vga_switcheroo_lock_ddc() returned an error. + * + * Return: Previous DDC owner on success (i.e. the client identifier of @pdev) + * or a negative int on error. + * Specifically, %-ENODEV if no handler has registered or if the handler + * does not support switching the DDC lines. Also, a negative value + * returned by the handler is propagated back to the caller. + * Finally, invoking this function without calling vga_switcheroo_lock_ddc() + * first is not allowed and will result in %-EINVAL. + */ +int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) +{ + enum vga_switcheroo_client_id id; + int ret = vgasr_priv.old_ddc_owner; + + if (WARN_ON_ONCE(!mutex_is_locked(&vgasr_priv.mux_hw_lock))) + return -EINVAL; + + if (vgasr_priv.old_ddc_owner >= 0) { + id = vgasr_priv.handler->get_client_id(pdev); + if (vgasr_priv.old_ddc_owner != id) + ret = vgasr_priv.handler->switch_ddc( + vgasr_priv.old_ddc_owner); + } + mutex_unlock(&vgasr_priv.mux_hw_lock); + return ret; +} +EXPORT_SYMBOL(vga_switcheroo_unlock_ddc); + +/** * DOC: Manual switching and manual power control * * In this mode of use, the file /sys/kernel/debug/vgaswitcheroo/switch @@ -549,7 +655,9 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) console_unlock(); } + mutex_lock(&vgasr_priv.mux_hw_lock); ret = vgasr_priv.handler->switchto(new_client->id); + mutex_unlock(&vgasr_priv.mux_hw_lock); if (ret) return ret; @@ -664,7 +772,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, vgasr_priv.delayed_switch_active = false; if (just_mux) { + mutex_lock(&vgasr_priv.mux_hw_lock); ret = vgasr_priv.handler->switchto(client_id); + mutex_unlock(&vgasr_priv.mux_hw_lock); goto out; } @@ -876,8 +986,11 @@ static int vga_switcheroo_runtime_suspend(struct device *dev) if (ret) return ret; mutex_lock(&vgasr_mutex); - if (vgasr_priv.handler->switchto) + if (vgasr_priv.handler->switchto) { + mutex_lock(&vgasr_priv.mux_hw_lock); vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD); + mutex_unlock(&vgasr_priv.mux_hw_lock); + } vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF); mutex_unlock(&vgasr_mutex); return 0; diff --git a/drivers/media/common/b2c2/flexcop-fe-tuner.c b/drivers/media/common/b2c2/flexcop-fe-tuner.c index 9c59f4306883..f5956402fc69 100644 --- a/drivers/media/common/b2c2/flexcop-fe-tuner.c +++ b/drivers/media/common/b2c2/flexcop-fe-tuner.c @@ -38,7 +38,7 @@ static int flexcop_fe_request_firmware(struct dvb_frontend *fe, #endif /* lnb control */ -#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299) +#if (FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)) && FE_SUPPORTED(PLL) static int flexcop_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { @@ -68,7 +68,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe, #endif #if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312) -static int flexcop_sleep(struct dvb_frontend* fe) +static int __maybe_unused flexcop_sleep(struct dvb_frontend* fe) { struct flexcop_device *fc = fe->dvb->priv; if (fc->fe_sleep) diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c index 412c5daf2b48..0f5114d406f8 100644 --- a/drivers/media/common/b2c2/flexcop.c +++ b/drivers/media/common/b2c2/flexcop.c @@ -1,7 +1,7 @@ /* * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III * flexcop.c - main module part - * Copyright (C) 2004-9 Patrick Boettcher <patrick.boettcher@desy.de> + * Copyright (C) 2004-9 Patrick Boettcher <patrick.boettcher@posteo.de> * based on skystar2-driver Copyright (C) 2003 Vadim Catana, skystar@moldova.cc * * Acknowledgements: @@ -34,7 +34,7 @@ #include "flexcop.h" #define DRIVER_NAME "B2C2 FlexcopII/II(b)/III digital TV receiver chip" -#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de" +#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de" #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG #define DEBSTATUS "" diff --git a/drivers/media/common/cypress_firmware.c b/drivers/media/common/cypress_firmware.c index 577e82058fdc..50e3f76d4847 100644 --- a/drivers/media/common/cypress_firmware.c +++ b/drivers/media/common/cypress_firmware.c @@ -1,6 +1,6 @@ /* cypress_firmware.c is part of the DVB USB library. * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for downloading the firmware to Cypress FX 1 diff --git a/drivers/media/common/cypress_firmware.h b/drivers/media/common/cypress_firmware.h index e493cbc7a528..1e4f27356205 100644 --- a/drivers/media/common/cypress_firmware.h +++ b/drivers/media/common/cypress_firmware.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for downloading the firmware to Cypress FX 1 diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c index 2a8d9a36d6f0..f3a42834d7d6 100644 --- a/drivers/media/common/siano/smscoreapi.c +++ b/drivers/media/common/siano/smscoreapi.c @@ -1167,8 +1167,8 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev, return rc; } pr_debug("read fw %s, buffer size=0x%zx\n", fw_filename, fw->size); - fw_buf = kmalloc(ALIGN(fw->size, SMS_ALLOC_ALIGNMENT), - GFP_KERNEL | GFP_DMA); + fw_buf = kmalloc(ALIGN(fw->size + sizeof(struct sms_firmware), + SMS_ALLOC_ALIGNMENT), GFP_KERNEL | GFP_DMA); if (!fw_buf) { pr_err("failed to allocate firmware buffer\n"); rc = -ENOMEM; diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c index d31f468830cf..9148e14c9d07 100644 --- a/drivers/media/common/siano/smsdvb-main.c +++ b/drivers/media/common/siano/smsdvb-main.c @@ -1015,12 +1015,6 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe) } } -/* Nothing to do here, as stats are automatically updated */ -static int smsdvb_get_frontend(struct dvb_frontend *fe) -{ - return 0; -} - static int smsdvb_init(struct dvb_frontend *fe) { struct smsdvb_client_t *client = @@ -1069,7 +1063,6 @@ static struct dvb_frontend_ops smsdvb_fe_ops = { .release = smsdvb_release, .set_frontend = smsdvb_set_frontend, - .get_frontend = smsdvb_get_frontend, .get_tune_settings = smsdvb_get_tune_settings, .read_status = smsdvb_read_status, diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h index 1c1c298d2289..dbdbb84294c5 100644 --- a/drivers/media/dvb-core/dvb-usb-ids.h +++ b/drivers/media/dvb-core/dvb-usb-ids.h @@ -1,6 +1,6 @@ /* dvb-usb-ids.h is part of the DVB USB library. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) see + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see * dvb-usb-init.c for copyright information. * * a header file containing define's for the USB device supported by the @@ -118,6 +118,7 @@ #define USB_PID_DIBCOM_STK807XP 0x1f90 #define USB_PID_DIBCOM_STK807XPVR 0x1f98 #define USB_PID_DIBCOM_STK8096GP 0x1fa0 +#define USB_PID_DIBCOM_STK8096PVR 0x1faa #define USB_PID_DIBCOM_NIM8096MD 0x1fa8 #define USB_PID_DIBCOM_TFE8096P 0x1f9C #define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131 @@ -247,6 +248,7 @@ #define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d #define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011 #define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012 +#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015 #define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014 #define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a #define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081 @@ -255,6 +257,10 @@ #define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062 #define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078 #define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab +#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8 +#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0 +#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102 +#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105 #define USB_PID_TERRATEC_H7 0x10b4 #define USB_PID_TERRATEC_H7_2 0x10a3 #define USB_PID_TERRATEC_H7_3 0x10a5 diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 40080645341e..4c35eb47472b 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -140,9 +140,12 @@ struct dvb_frontend_private { static void dvb_frontend_wakeup(struct dvb_frontend *fe); static int dtv_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c, struct dvb_frontend_parameters *p_out); -static int dtv_property_legacy_params_sync(struct dvb_frontend *fe, - struct dvb_frontend_parameters *p); +static int +dtv_property_legacy_params_sync(struct dvb_frontend *fe, + const struct dtv_frontend_properties *c, + struct dvb_frontend_parameters *p); static bool has_get_frontend(struct dvb_frontend *fe) { @@ -202,6 +205,7 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe, enum fe_status status) { struct dvb_frontend_private *fepriv = fe->frontend_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_fe_events *events = &fepriv->events; struct dvb_frontend_event *e; int wp; @@ -209,7 +213,7 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe, dev_dbg(fe->dvb->device, "%s:\n", __func__); if ((status & FE_HAS_LOCK) && has_get_frontend(fe)) - dtv_get_frontend(fe, &fepriv->parameters_out); + dtv_get_frontend(fe, c, &fepriv->parameters_out); mutex_lock(&events->mtx); @@ -687,6 +691,7 @@ static int dvb_enable_media_tuner(struct dvb_frontend *fe) static int dvb_frontend_thread(void *data) { struct dvb_frontend *fe = data; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_frontend_private *fepriv = fe->frontend_priv; enum fe_status s; enum dvbfe_algo algo; @@ -807,7 +812,7 @@ restart: fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; fepriv->delay = HZ / 2; } - dtv_property_legacy_params_sync(fe, &fepriv->parameters_out); + dtv_property_legacy_params_sync(fe, c, &fepriv->parameters_out); fe->ops.read_status(fe, &s); if (s != fepriv->status) { dvb_frontend_add_event(fe, s); /* update event list */ @@ -899,10 +904,10 @@ void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec) s32 delta; *waketime = ktime_add_us(*waketime, add_usec); - delta = ktime_us_delta(ktime_get_real(), *waketime); + delta = ktime_us_delta(ktime_get_boottime(), *waketime); if (delta > 2500) { msleep((delta - 1500) / 1000); - delta = ktime_us_delta(ktime_get_real(), *waketime); + delta = ktime_us_delta(ktime_get_boottime(), *waketime); } if (delta > 0) udelay(delta); @@ -1162,18 +1167,24 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = { _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0), }; -static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp) +static void dtv_property_dump(struct dvb_frontend *fe, + bool is_set, + struct dtv_property *tvp) { int i; if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) { - dev_warn(fe->dvb->device, "%s: tvp.cmd = 0x%08x undefined\n", - __func__, tvp->cmd); + dev_warn(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x undefined\n", + __func__, + is_set ? "SET" : "GET", + tvp->cmd); return; } - dev_dbg(fe->dvb->device, "%s: tvp.cmd = 0x%08x (%s)\n", __func__, - tvp->cmd, dtv_cmds[tvp->cmd].name); + dev_dbg(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x (%s)\n", __func__, + is_set ? "SET" : "GET", + tvp->cmd, + dtv_cmds[tvp->cmd].name); if (dtv_cmds[tvp->cmd].buffer) { dev_dbg(fe->dvb->device, "%s: tvp.u.buffer.len = 0x%02x\n", @@ -1268,11 +1279,11 @@ static int dtv_property_cache_sync(struct dvb_frontend *fe, /* Ensure the cached values are set correctly in the frontend * legacy tuning structures, for the advanced tuning API. */ -static int dtv_property_legacy_params_sync(struct dvb_frontend *fe, - struct dvb_frontend_parameters *p) +static int +dtv_property_legacy_params_sync(struct dvb_frontend *fe, + const struct dtv_frontend_properties *c, + struct dvb_frontend_parameters *p) { - const struct dtv_frontend_properties *c = &fe->dtv_property_cache; - p->frequency = c->frequency; p->inversion = c->inversion; @@ -1344,16 +1355,17 @@ static int dtv_property_legacy_params_sync(struct dvb_frontend *fe, * If p_out is not null, it will update the DVBv3 params pointed by it. */ static int dtv_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c, struct dvb_frontend_parameters *p_out) { int r; if (fe->ops.get_frontend) { - r = fe->ops.get_frontend(fe); + r = fe->ops.get_frontend(fe, c); if (unlikely(r < 0)) return r; if (p_out) - dtv_property_legacy_params_sync(fe, p_out); + dtv_property_legacy_params_sync(fe, c, p_out); return 0; } @@ -1589,7 +1601,7 @@ static int dtv_property_process_get(struct dvb_frontend *fe, return r; } - dtv_property_dump(fe, tvp); + dtv_property_dump(fe, false, tvp); return 0; } @@ -1830,6 +1842,8 @@ static int dtv_property_process_set(struct dvb_frontend *fe, return r; } + dtv_property_dump(fe, true, tvp); + switch(tvp->cmd) { case DTV_CLEAR: /* @@ -2073,6 +2087,8 @@ static int dvb_frontend_ioctl_properties(struct file *file, dev_dbg(fe->dvb->device, "%s: Property cache is full, tuning\n", __func__); } else if (cmd == FE_GET_PROPERTY) { + struct dtv_frontend_properties getp = fe->dtv_property_cache; + dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num); dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props); @@ -2094,17 +2110,18 @@ static int dvb_frontend_ioctl_properties(struct file *file, } /* - * Fills the cache out struct with the cache contents, plus - * the data retrieved from get_frontend, if the frontend - * is not idle. Otherwise, returns the cached content + * Let's use our own copy of property cache, in order to + * avoid mangling with DTV zigzag logic, as drivers might + * return crap, if they don't check if the data is available + * before updating the properties cache. */ if (fepriv->state != FESTATE_IDLE) { - err = dtv_get_frontend(fe, NULL); + err = dtv_get_frontend(fe, &getp, NULL); if (err < 0) goto out; } for (i = 0; i < tvps->num; i++) { - err = dtv_property_process_get(fe, c, tvp + i, file); + err = dtv_property_process_get(fe, &getp, tvp + i, file); if (err < 0) goto out; (tvp + i)->result = err; @@ -2139,7 +2156,7 @@ static int dtv_set_frontend(struct dvb_frontend *fe) * the user. FE_SET_FRONTEND triggers an initial frontend event * with status = 0, which copies output parameters to userspace. */ - dtv_property_legacy_params_sync(fe, &fepriv->parameters_out); + dtv_property_legacy_params_sync(fe, c, &fepriv->parameters_out); /* * Be sure that the bandwidth will be filled for all @@ -2451,7 +2468,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file, u8 last = 1; if (dvb_frontend_debug) printk("%s switch command: 0x%04lx\n", __func__, swcmd); - nexttime = ktime_get_real(); + nexttime = ktime_get_boottime(); if (dvb_frontend_debug) tv[0] = nexttime; /* before sending a command, initialize by sending @@ -2462,7 +2479,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file, for (i = 0; i < 9; i++) { if (dvb_frontend_debug) - tv[i+1] = ktime_get_real(); + tv[i+1] = ktime_get_boottime(); if ((swcmd & 0x01) != last) { /* set voltage to (last ? 13V : 18V) */ fe->ops.set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18); @@ -2509,10 +2526,18 @@ static int dvb_frontend_ioctl_legacy(struct file *file, err = dvb_frontend_get_event (fe, parg, file->f_flags); break; - case FE_GET_FRONTEND: - err = dtv_get_frontend(fe, parg); - break; + case FE_GET_FRONTEND: { + struct dtv_frontend_properties getp = fe->dtv_property_cache; + /* + * Let's use our own copy of property cache, in order to + * avoid mangling with DTV zigzag logic, as drivers might + * return crap, if they don't check if the data is available + * before updating the properties cache. + */ + err = dtv_get_frontend(fe, &getp, parg); + break; + } case FE_SET_FRONTEND_TUNE_MODE: fepriv->tune_mode_flags = (unsigned long) parg; err = 0; diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h index 458bcce20e38..9592573a0b41 100644 --- a/drivers/media/dvb-core/dvb_frontend.h +++ b/drivers/media/dvb-core/dvb_frontend.h @@ -449,7 +449,8 @@ struct dvb_frontend_ops { int (*set_frontend)(struct dvb_frontend *fe); int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings); - int (*get_frontend)(struct dvb_frontend *fe); + int (*get_frontend)(struct dvb_frontend *fe, + struct dtv_frontend_properties *props); int (*read_status)(struct dvb_frontend *fe, enum fe_status *status); int (*read_ber)(struct dvb_frontend* fe, u32* ber); diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 560450a0b32a..1b9732ee0a4f 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -58,7 +58,7 @@ static const char * const dnames[] = { #define DVB_MAX_IDS MAX_DVB_MINORS #else #define DVB_MAX_IDS 4 -#define nums2minor(num,type,id) ((num << 6) | (id << 4) | type) +#define nums2minor(num, type, id) ((num << 6) | (id << 4) | type) #define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64) #endif @@ -85,7 +85,7 @@ static int dvb_device_open(struct inode *inode, struct file *file) file->private_data = dvbdev; replace_fops(file, new_fops); if (file->f_op->open) - err = file->f_op->open(inode,file); + err = file->f_op->open(inode, file); up_read(&minor_rwsem); mutex_unlock(&dvbdev_mutex); return err; @@ -352,7 +352,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev, ret = media_device_register_entity(dvbdev->adapter->mdev, dvbdev->entity); if (ret) - return (ret); + return ret; printk(KERN_DEBUG "%s: media entity '%s' registered.\n", __func__, dvbdev->entity->name); @@ -620,8 +620,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap, return -ENOMEM; adap->conn = conn; - adap->conn_pads = kcalloc(1, sizeof(*adap->conn_pads), - GFP_KERNEL); + adap->conn_pads = kzalloc(sizeof(*adap->conn_pads), GFP_KERNEL); if (!adap->conn_pads) return -ENOMEM; @@ -661,7 +660,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap, if (ntuner && ndemod) { ret = media_create_pad_links(mdev, MEDIA_ENT_F_TUNER, - tuner, TUNER_PAD_IF_OUTPUT, + tuner, TUNER_PAD_OUTPUT, MEDIA_ENT_F_DTV_DEMOD, demod, 0, MEDIA_LNK_FL_ENABLED, false); @@ -868,7 +867,7 @@ int dvb_usercopy(struct file *file, parg = sbuf; } else { /* too big to allocate from stack */ - mbuf = kmalloc(_IOC_SIZE(cmd),GFP_KERNEL); + mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); if (NULL == mbuf) return -ENOMEM; parg = mbuf; diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c index e23197da84af..8bcde336ffd7 100644 --- a/drivers/media/dvb-frontends/af9013.c +++ b/drivers/media/dvb-frontends/af9013.c @@ -866,9 +866,9 @@ err: return ret; } -static int af9013_get_frontend(struct dvb_frontend *fe) +static int af9013_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { - struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct af9013_state *state = fe->demodulator_priv; int ret; u8 buf[3]; @@ -1344,6 +1344,10 @@ err: static void af9013_release(struct dvb_frontend *fe) { struct af9013_state *state = fe->demodulator_priv; + + /* stop statistics polling */ + cancel_delayed_work_sync(&state->statistics_work); + kfree(state); } diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c index bc35206a0821..efebe5ce2429 100644 --- a/drivers/media/dvb-frontends/af9033.c +++ b/drivers/media/dvb-frontends/af9033.c @@ -691,10 +691,10 @@ err: return ret; } -static int af9033_get_frontend(struct dvb_frontend *fe) +static int af9033_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct af9033_dev *dev = fe->demodulator_priv; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; u8 buf[8]; @@ -1372,6 +1372,9 @@ static int af9033_remove(struct i2c_client *client) dev_dbg(&dev->client->dev, "\n"); + /* stop statistics polling */ + cancel_delayed_work_sync(&dev->stat_work); + dev->fe.ops.release = NULL; dev->fe.demodulator_priv = NULL; kfree(dev); diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c index 544c5f65d19a..9412fcd1bddb 100644 --- a/drivers/media/dvb-frontends/as102_fe.c +++ b/drivers/media/dvb-frontends/as102_fe.c @@ -190,10 +190,10 @@ static int as102_fe_set_frontend(struct dvb_frontend *fe) return state->ops->set_tune(state->priv, &tune_args); } -static int as102_fe_get_frontend(struct dvb_frontend *fe) +static int as102_fe_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct as102_state *state = fe->demodulator_priv; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret = 0; struct as10x_tps tps = { 0 }; diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c index 8fe552e293ed..47248b868e38 100644 --- a/drivers/media/dvb-frontends/atbm8830.c +++ b/drivers/media/dvb-frontends/atbm8830.c @@ -297,9 +297,9 @@ static int atbm8830_set_fe(struct dvb_frontend *fe) return 0; } -static int atbm8830_get_fe(struct dvb_frontend *fe) +static int atbm8830_get_fe(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { - struct dtv_frontend_properties *c = &fe->dtv_property_cache; dprintk("%s\n", __func__); /* TODO: get real readings from device */ diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c index 6c1e97640f3f..e676b9461a59 100644 --- a/drivers/media/dvb-frontends/au8522_dig.c +++ b/drivers/media/dvb-frontends/au8522_dig.c @@ -816,9 +816,9 @@ static int au8522_read_ber(struct dvb_frontend *fe, u32 *ber) return au8522_read_ucblocks(fe, ber); } -static int au8522_get_frontend(struct dvb_frontend *fe) +static int au8522_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { - struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct au8522_state *state = fe->demodulator_priv; c->frequency = state->current_frequency; diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c index d30275f27644..bb698839e477 100644 --- a/drivers/media/dvb-frontends/bcm3510.c +++ b/drivers/media/dvb-frontends/bcm3510.c @@ -3,7 +3,7 @@ * * Copyright (C) 2001-5, B2C2 inc. * - * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de> + * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de> * * This driver is "hard-coded" to be used with the 1st generation of * Technisat/B2C2's Air2PC ATSC PCI/USB cards/boxes. The pll-programming @@ -865,5 +865,5 @@ static struct dvb_frontend_ops bcm3510_ops = { }; MODULE_DESCRIPTION("Broadcom BCM3510 ATSC (8VSB/16VSB & ITU J83 AnnexB FEC QAM64/256) demodulator driver"); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/bcm3510.h b/drivers/media/dvb-frontends/bcm3510.h index ff66492fb940..961c2eb87c68 100644 --- a/drivers/media/dvb-frontends/bcm3510.h +++ b/drivers/media/dvb-frontends/bcm3510.h @@ -3,7 +3,7 @@ * * Copyright (C) 2001-5, B2C2 inc. * - * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de> + * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/dvb-frontends/bcm3510_priv.h b/drivers/media/dvb-frontends/bcm3510_priv.h index 3bb1bc2a04f0..67f24686c31b 100644 --- a/drivers/media/dvb-frontends/bcm3510_priv.h +++ b/drivers/media/dvb-frontends/bcm3510_priv.h @@ -3,7 +3,7 @@ * * Copyright (C) 2001-5, B2C2 inc. * - * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de> + * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c index fd033cca6e11..5cad925609e0 100644 --- a/drivers/media/dvb-frontends/cx22700.c +++ b/drivers/media/dvb-frontends/cx22700.c @@ -345,9 +345,9 @@ static int cx22700_set_frontend(struct dvb_frontend *fe) return 0; } -static int cx22700_get_frontend(struct dvb_frontend *fe) +static int cx22700_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { - struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx22700_state* state = fe->demodulator_priv; u8 reg09 = cx22700_readreg (state, 0x09); diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c index d2d06dcd7683..c0e54c59cccf 100644 --- a/drivers/media/dvb-frontends/cx22702.c +++ b/drivers/media/dvb-frontends/cx22702.c @@ -562,9 +562,9 @@ static int cx22702_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) return 0; } -static int cx22702_get_frontend(struct dvb_frontend *fe) +static int cx22702_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { - struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx22702_state *state = fe->demodulator_priv; u8 reg0C = cx22702_readreg(state, 0x0C); diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c index cb36475e322b..6cb81ec12847 100644 --- a/drivers/media/dvb-frontends/cx24110.c +++ b/drivers/media/dvb-frontends/cx24110.c @@ -550,9 +550,9 @@ static int cx24110_set_frontend(struct dvb_frontend *fe) return 0; } -static int cx24110_get_frontend(struct dvb_frontend *fe) +static int cx24110_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct cx24110_state *state = fe->demodulator_priv; s32 afc; unsigned sclk; diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c index 5f77bc80a896..a3f7eb4e609d 100644 --- a/drivers/media/dvb-frontends/cx24117.c +++ b/drivers/media/dvb-frontends/cx24117.c @@ -1560,10 +1560,10 @@ static int cx24117_get_algo(struct dvb_frontend *fe) return DVBFE_ALGO_HW; } -static int cx24117_get_frontend(struct dvb_frontend *fe) +static int cx24117_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct cx24117_state *state = fe->demodulator_priv; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx24117_cmd cmd; u8 reg, st, inv; int ret, idx; diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c index 3b0ef52bb834..6ccbd86c9490 100644 --- a/drivers/media/dvb-frontends/cx24120.c +++ b/drivers/media/dvb-frontends/cx24120.c @@ -1502,9 +1502,9 @@ static int cx24120_sleep(struct dvb_frontend *fe) return 0; } -static int cx24120_get_frontend(struct dvb_frontend *fe) +static int cx24120_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { - struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx24120_state *state = fe->demodulator_priv; u8 freq1, freq2, freq3; diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c index 0fe7fb11124b..113b0949408a 100644 --- a/drivers/media/dvb-frontends/cx24123.c +++ b/drivers/media/dvb-frontends/cx24123.c @@ -945,9 +945,9 @@ static int cx24123_set_frontend(struct dvb_frontend *fe) return 0; } -static int cx24123_get_frontend(struct dvb_frontend *fe) +static int cx24123_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct cx24123_state *state = fe->demodulator_priv; dprintk("\n"); diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c index 42fad6aa3958..a674a6312c38 100644 --- a/drivers/media/dvb-frontends/cxd2820r_c.c +++ b/drivers/media/dvb-frontends/cxd2820r_c.c @@ -101,10 +101,10 @@ error: return ret; } -int cxd2820r_get_frontend_c(struct dvb_frontend *fe) +int cxd2820r_get_frontend_c(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct cxd2820r_priv *priv = fe->demodulator_priv; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; u8 buf[2]; diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c index 24a457d9d803..314d3b8c1080 100644 --- a/drivers/media/dvb-frontends/cxd2820r_core.c +++ b/drivers/media/dvb-frontends/cxd2820r_core.c @@ -313,7 +313,8 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, enum fe_status *status) return ret; } -static int cxd2820r_get_frontend(struct dvb_frontend *fe) +static int cxd2820r_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { struct cxd2820r_priv *priv = fe->demodulator_priv; int ret; @@ -326,13 +327,13 @@ static int cxd2820r_get_frontend(struct dvb_frontend *fe) switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: - ret = cxd2820r_get_frontend_t(fe); + ret = cxd2820r_get_frontend_t(fe, p); break; case SYS_DVBT2: - ret = cxd2820r_get_frontend_t2(fe); + ret = cxd2820r_get_frontend_t2(fe, p); break; case SYS_DVBC_ANNEX_A: - ret = cxd2820r_get_frontend_c(fe); + ret = cxd2820r_get_frontend_c(fe, p); break; default: ret = -EINVAL; @@ -606,8 +607,7 @@ static int cxd2820r_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr, int val) { - struct cxd2820r_priv *priv = - container_of(chip, struct cxd2820r_priv, gpio_chip); + struct cxd2820r_priv *priv = gpiochip_get_data(chip); u8 gpio[GPIO_COUNT]; dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val); @@ -620,8 +620,7 @@ static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr, static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val) { - struct cxd2820r_priv *priv = - container_of(chip, struct cxd2820r_priv, gpio_chip); + struct cxd2820r_priv *priv = gpiochip_get_data(chip); u8 gpio[GPIO_COUNT]; dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val); @@ -636,8 +635,7 @@ static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val) static int cxd2820r_gpio_get(struct gpio_chip *chip, unsigned nr) { - struct cxd2820r_priv *priv = - container_of(chip, struct cxd2820r_priv, gpio_chip); + struct cxd2820r_priv *priv = gpiochip_get_data(chip); dev_dbg(&priv->i2c->dev, "%s: nr=%d\n", __func__, nr); @@ -731,7 +729,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg, priv->gpio_chip.base = -1; /* dynamic allocation */ priv->gpio_chip.ngpio = GPIO_COUNT; priv->gpio_chip.can_sleep = 1; - ret = gpiochip_add(&priv->gpio_chip); + ret = gpiochip_add_data(&priv->gpio_chip, priv); if (ret) goto error; diff --git a/drivers/media/dvb-frontends/cxd2820r_priv.h b/drivers/media/dvb-frontends/cxd2820r_priv.h index a0d53f01a8bf..e31c48e53097 100644 --- a/drivers/media/dvb-frontends/cxd2820r_priv.h +++ b/drivers/media/dvb-frontends/cxd2820r_priv.h @@ -76,7 +76,8 @@ int cxd2820r_rd_reg(struct cxd2820r_priv *priv, u32 reg, u8 *val); /* cxd2820r_c.c */ -int cxd2820r_get_frontend_c(struct dvb_frontend *fe); +int cxd2820r_get_frontend_c(struct dvb_frontend *fe, + struct dtv_frontend_properties *p); int cxd2820r_set_frontend_c(struct dvb_frontend *fe); @@ -99,7 +100,8 @@ int cxd2820r_get_tune_settings_c(struct dvb_frontend *fe, /* cxd2820r_t.c */ -int cxd2820r_get_frontend_t(struct dvb_frontend *fe); +int cxd2820r_get_frontend_t(struct dvb_frontend *fe, + struct dtv_frontend_properties *p); int cxd2820r_set_frontend_t(struct dvb_frontend *fe); @@ -122,7 +124,8 @@ int cxd2820r_get_tune_settings_t(struct dvb_frontend *fe, /* cxd2820r_t2.c */ -int cxd2820r_get_frontend_t2(struct dvb_frontend *fe); +int cxd2820r_get_frontend_t2(struct dvb_frontend *fe, + struct dtv_frontend_properties *p); int cxd2820r_set_frontend_t2(struct dvb_frontend *fe); diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c index 21abf1b4ed4d..75ce7d8ded00 100644 --- a/drivers/media/dvb-frontends/cxd2820r_t.c +++ b/drivers/media/dvb-frontends/cxd2820r_t.c @@ -138,10 +138,10 @@ error: return ret; } -int cxd2820r_get_frontend_t(struct dvb_frontend *fe) +int cxd2820r_get_frontend_t(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct cxd2820r_priv *priv = fe->demodulator_priv; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; u8 buf[2]; diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c index 4e028b41c0d5..704475676234 100644 --- a/drivers/media/dvb-frontends/cxd2820r_t2.c +++ b/drivers/media/dvb-frontends/cxd2820r_t2.c @@ -23,8 +23,8 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe) { - struct cxd2820r_priv *priv = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct cxd2820r_priv *priv = fe->demodulator_priv; int ret, i, bw_i; u32 if_freq, if_ctl; u64 num; @@ -169,10 +169,10 @@ error: } -int cxd2820r_get_frontend_t2(struct dvb_frontend *fe) +int cxd2820r_get_frontend_t2(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct cxd2820r_priv *priv = fe->demodulator_priv; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; u8 buf[2]; diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index fdffb2f0ded8..900186ba8e62 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -2090,13 +2090,13 @@ static int cxd2841er_sleep_tc_to_active_c(struct cxd2841er_priv *priv, return 0; } -static int cxd2841er_get_frontend(struct dvb_frontend *fe) +static int cxd2841er_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { enum fe_status status = 0; u16 strength = 0, snr = 0; u32 errors = 0, ber = 0; struct cxd2841er_priv *priv = fe->demodulator_priv; - struct dtv_frontend_properties *p = &fe->dtv_property_cache; dev_dbg(&priv->i2c->dev, "%s()\n", __func__); if (priv->state == STATE_ACTIVE_S) diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c index 0b8fb5dd1889..ee7d66997ccd 100644 --- a/drivers/media/dvb-frontends/dib0070.c +++ b/drivers/media/dvb-frontends/dib0070.c @@ -774,6 +774,6 @@ free_mem: } EXPORT_SYMBOL(dib0070_attach); -MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c index 47cb72243b9d..976ee034a430 100644 --- a/drivers/media/dvb-frontends/dib0090.c +++ b/drivers/media/dvb-frontends/dib0090.c @@ -2669,7 +2669,7 @@ free_mem: } EXPORT_SYMBOL(dib0090_fw_register); -MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); -MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); +MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>"); MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h index 6ae9899b5b45..d5dfafb4ef13 100644 --- a/drivers/media/dvb-frontends/dib3000.h +++ b/drivers/media/dvb-frontends/dib3000.h @@ -2,11 +2,11 @@ * public header file of the frontend drivers for mobile DVB-T demodulators * DiBcom 3000M-B and DiBcom 3000P/M-C (http://www.dibcom.fr/) * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * based on GPL code from DibCom, which has * - * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr) + * Copyright (C) 2004 Amaury Demol for DiBcom * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -14,7 +14,7 @@ * * Acknowledgements * - * Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver + * Amaury Demol from DiBcom for providing specs and driver * sources, on which this driver (and the dvb-dibusb) are based. * * see Documentation/dvb/README.dvb-usb for more information diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c index 7a61172d0d45..6821ecb53d63 100644 --- a/drivers/media/dvb-frontends/dib3000mb.c +++ b/drivers/media/dvb-frontends/dib3000mb.c @@ -2,11 +2,11 @@ * Frontend driver for mobile DVB-T demodulator DiBcom 3000M-B * DiBcom (http://www.dibcom.fr/) * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * based on GPL code from DibCom, which has * - * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr) + * Copyright (C) 2004 Amaury Demol for DiBcom * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -14,7 +14,7 @@ * * Acknowledgements * - * Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver + * Amaury Demol from DiBcom for providing specs and driver * sources, on which this driver (and the dvb-dibusb) are based. * * see Documentation/dvb/README.dvb-usb for more information @@ -36,7 +36,7 @@ /* Version information */ #define DRIVER_VERSION "0.1" #define DRIVER_DESC "DiBcom 3000M-B DVB-T demodulator" -#define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@desy.de" +#define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@posteo.de" static int debug; module_param(debug, int, 0644); @@ -112,7 +112,8 @@ static u16 dib3000_seq[2][2][2] = /* fft,gua, inv */ } }; -static int dib3000mb_get_frontend(struct dvb_frontend* fe); +static int dib3000mb_get_frontend(struct dvb_frontend* fe, + struct dtv_frontend_properties *c); static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner) { @@ -359,7 +360,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner) deb_setf("search_state after autosearch %d after %d checks\n",search_state,as_count); if (search_state == 1) { - if (dib3000mb_get_frontend(fe) == 0) { + if (dib3000mb_get_frontend(fe, c) == 0) { deb_setf("reading tuning data from frontend succeeded.\n"); return dib3000mb_set_frontend(fe, 0); } @@ -450,9 +451,9 @@ static int dib3000mb_fe_init(struct dvb_frontend* fe, int mobile_mode) return 0; } -static int dib3000mb_get_frontend(struct dvb_frontend* fe) +static int dib3000mb_get_frontend(struct dvb_frontend* fe, + struct dtv_frontend_properties *c) { - struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dib3000_state* state = fe->demodulator_priv; enum fe_code_rate *cr; u16 tps_val; diff --git a/drivers/media/dvb-frontends/dib3000mb_priv.h b/drivers/media/dvb-frontends/dib3000mb_priv.h index 9dc235aa44b7..0459d5c84314 100644 --- a/drivers/media/dvb-frontends/dib3000mb_priv.h +++ b/drivers/media/dvb-frontends/dib3000mb_priv.h @@ -1,7 +1,7 @@ /* * dib3000mb_priv.h * - * Copyright (C) 2004 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004 Patrick Boettcher (patrick.boettcher@posteo.de) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c index 583d6b7fabed..da0f1dc5aaf7 100644 --- a/drivers/media/dvb-frontends/dib3000mc.c +++ b/drivers/media/dvb-frontends/dib3000mc.c @@ -2,7 +2,7 @@ * Driver for DiBcom DiB3000MC/P-demodulator. * * Copyright (C) 2004-7 DiBcom (http://www.dibcom.fr/) - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * This code is partially based on the previous dib3000mc.c . * @@ -636,9 +636,9 @@ struct i2c_adapter * dib3000mc_get_tuner_i2c_master(struct dvb_frontend *demod, EXPORT_SYMBOL(dib3000mc_get_tuner_i2c_master); -static int dib3000mc_get_frontend(struct dvb_frontend* fe) +static int dib3000mc_get_frontend(struct dvb_frontend* fe, + struct dtv_frontend_properties *fep) { - struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct dib3000mc_state *state = fe->demodulator_priv; u16 tps = dib3000mc_read_word(state,458); @@ -726,7 +726,7 @@ static int dib3000mc_set_frontend(struct dvb_frontend *fe) if (found == 0 || found == 1) return 0; // no channel found - dib3000mc_get_frontend(fe); + dib3000mc_get_frontend(fe, fep); } ret = dib3000mc_tune(fe); @@ -939,6 +939,6 @@ static struct dvb_frontend_ops dib3000mc_ops = { .read_ucblocks = dib3000mc_read_unc_blocks, }; -MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for the DiBcom 3000MC/P COFDM demodulator"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/dib3000mc.h b/drivers/media/dvb-frontends/dib3000mc.h index 74816f793611..b37e69e6a58c 100644 --- a/drivers/media/dvb-frontends/dib3000mc.h +++ b/drivers/media/dvb-frontends/dib3000mc.h @@ -2,7 +2,7 @@ * Driver for DiBcom DiB3000MC/P-demodulator. * * Copyright (C) 2004-6 DiBcom (http://www.dibcom.fr/) - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher\@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * This code is partially based on the previous dib3000mc.c . * diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c index 35eb71fe3c2b..b3ddae8885ac 100644 --- a/drivers/media/dvb-frontends/dib7000m.c +++ b/drivers/media/dvb-frontends/dib7000m.c @@ -1151,9 +1151,9 @@ static int dib7000m_identify(struct dib7000m_state *state) } -static int dib7000m_get_frontend(struct dvb_frontend* fe) +static int dib7000m_get_frontend(struct dvb_frontend* fe, + struct dtv_frontend_properties *fep) { - struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct dib7000m_state *state = fe->demodulator_priv; u16 tps = dib7000m_read_word(state,480); @@ -1246,7 +1246,7 @@ static int dib7000m_set_frontend(struct dvb_frontend *fe) if (found == 0 || found == 1) return 0; // no channel found - dib7000m_get_frontend(fe); + dib7000m_get_frontend(fe, fep); } ret = dib7000m_tune(fe); @@ -1465,6 +1465,6 @@ static struct dvb_frontend_ops dib7000m_ops = { .read_ucblocks = dib7000m_read_unc_blocks, }; -MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for the DiBcom 7000MA/MB/PA/PB/MC COFDM demodulator"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c index 33be5d6b9e10..b861d4437f2a 100644 --- a/drivers/media/dvb-frontends/dib7000p.c +++ b/drivers/media/dvb-frontends/dib7000p.c @@ -1405,9 +1405,9 @@ static int dib7000p_identify(struct dib7000p_state *st) return 0; } -static int dib7000p_get_frontend(struct dvb_frontend *fe) +static int dib7000p_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *fep) { - struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct dib7000p_state *state = fe->demodulator_priv; u16 tps = dib7000p_read_word(state, 463); @@ -1540,7 +1540,7 @@ static int dib7000p_set_frontend(struct dvb_frontend *fe) if (found == 0 || found == 1) return 0; - dib7000p_get_frontend(fe); + dib7000p_get_frontend(fe, fep); } ret = dib7000p_tune(fe); @@ -2834,7 +2834,7 @@ static struct dvb_frontend_ops dib7000p_ops = { .read_ucblocks = dib7000p_read_unc_blocks, }; -MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>"); -MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); +MODULE_AUTHOR("Olivier Grenie <olivie.grenie@parrot.com>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for the DiBcom 7000PC COFDM demodulator"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 94c26270fff0..ddf9c44877a2 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -3382,14 +3382,15 @@ static int dib8000_sleep(struct dvb_frontend *fe) static int dib8000_read_status(struct dvb_frontend *fe, enum fe_status *stat); -static int dib8000_get_frontend(struct dvb_frontend *fe) +static int dib8000_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct dib8000_state *state = fe->demodulator_priv; u16 i, val = 0; enum fe_status stat = 0; u8 index_frontend, sub_index_frontend; - fe->dtv_property_cache.bandwidth_hz = 6000000; + c->bandwidth_hz = 6000000; /* * If called to early, get_frontend makes dib8000_tune to either @@ -3406,7 +3407,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe) if (stat&FE_HAS_SYNC) { dprintk("TMCC lock on the slave%i", index_frontend); /* synchronize the cache with the other frontends */ - state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend]); + state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c); for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) { if (sub_index_frontend != index_frontend) { state->fe[sub_index_frontend]->dtv_property_cache.isdbt_sb_mode = state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode; @@ -3426,57 +3427,57 @@ static int dib8000_get_frontend(struct dvb_frontend *fe) } } - fe->dtv_property_cache.isdbt_sb_mode = dib8000_read_word(state, 508) & 0x1; + c->isdbt_sb_mode = dib8000_read_word(state, 508) & 0x1; if (state->revision == 0x8090) val = dib8000_read_word(state, 572); else val = dib8000_read_word(state, 570); - fe->dtv_property_cache.inversion = (val & 0x40) >> 6; + c->inversion = (val & 0x40) >> 6; switch ((val & 0x30) >> 4) { case 1: - fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_2K; + c->transmission_mode = TRANSMISSION_MODE_2K; dprintk("dib8000_get_frontend: transmission mode 2K"); break; case 2: - fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_4K; + c->transmission_mode = TRANSMISSION_MODE_4K; dprintk("dib8000_get_frontend: transmission mode 4K"); break; case 3: default: - fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K; + c->transmission_mode = TRANSMISSION_MODE_8K; dprintk("dib8000_get_frontend: transmission mode 8K"); break; } switch (val & 0x3) { case 0: - fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_32; + c->guard_interval = GUARD_INTERVAL_1_32; dprintk("dib8000_get_frontend: Guard Interval = 1/32 "); break; case 1: - fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_16; + c->guard_interval = GUARD_INTERVAL_1_16; dprintk("dib8000_get_frontend: Guard Interval = 1/16 "); break; case 2: dprintk("dib8000_get_frontend: Guard Interval = 1/8 "); - fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8; + c->guard_interval = GUARD_INTERVAL_1_8; break; case 3: dprintk("dib8000_get_frontend: Guard Interval = 1/4 "); - fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_4; + c->guard_interval = GUARD_INTERVAL_1_4; break; } val = dib8000_read_word(state, 505); - fe->dtv_property_cache.isdbt_partial_reception = val & 1; - dprintk("dib8000_get_frontend: partial_reception = %d ", fe->dtv_property_cache.isdbt_partial_reception); + c->isdbt_partial_reception = val & 1; + dprintk("dib8000_get_frontend: partial_reception = %d ", c->isdbt_partial_reception); for (i = 0; i < 3; i++) { int show; val = dib8000_read_word(state, 493 + i) & 0x0f; - fe->dtv_property_cache.layer[i].segment_count = val; + c->layer[i].segment_count = val; if (val == 0 || val > 13) show = 0; @@ -3485,41 +3486,41 @@ static int dib8000_get_frontend(struct dvb_frontend *fe) if (show) dprintk("dib8000_get_frontend: Layer %d segments = %d ", - i, fe->dtv_property_cache.layer[i].segment_count); + i, c->layer[i].segment_count); val = dib8000_read_word(state, 499 + i) & 0x3; /* Interleaving can be 0, 1, 2 or 4 */ if (val == 3) val = 4; - fe->dtv_property_cache.layer[i].interleaving = val; + c->layer[i].interleaving = val; if (show) dprintk("dib8000_get_frontend: Layer %d time_intlv = %d ", - i, fe->dtv_property_cache.layer[i].interleaving); + i, c->layer[i].interleaving); val = dib8000_read_word(state, 481 + i); switch (val & 0x7) { case 1: - fe->dtv_property_cache.layer[i].fec = FEC_1_2; + c->layer[i].fec = FEC_1_2; if (show) dprintk("dib8000_get_frontend: Layer %d Code Rate = 1/2 ", i); break; case 2: - fe->dtv_property_cache.layer[i].fec = FEC_2_3; + c->layer[i].fec = FEC_2_3; if (show) dprintk("dib8000_get_frontend: Layer %d Code Rate = 2/3 ", i); break; case 3: - fe->dtv_property_cache.layer[i].fec = FEC_3_4; + c->layer[i].fec = FEC_3_4; if (show) dprintk("dib8000_get_frontend: Layer %d Code Rate = 3/4 ", i); break; case 5: - fe->dtv_property_cache.layer[i].fec = FEC_5_6; + c->layer[i].fec = FEC_5_6; if (show) dprintk("dib8000_get_frontend: Layer %d Code Rate = 5/6 ", i); break; default: - fe->dtv_property_cache.layer[i].fec = FEC_7_8; + c->layer[i].fec = FEC_7_8; if (show) dprintk("dib8000_get_frontend: Layer %d Code Rate = 7/8 ", i); break; @@ -3528,23 +3529,23 @@ static int dib8000_get_frontend(struct dvb_frontend *fe) val = dib8000_read_word(state, 487 + i); switch (val & 0x3) { case 0: - fe->dtv_property_cache.layer[i].modulation = DQPSK; + c->layer[i].modulation = DQPSK; if (show) dprintk("dib8000_get_frontend: Layer %d DQPSK ", i); break; case 1: - fe->dtv_property_cache.layer[i].modulation = QPSK; + c->layer[i].modulation = QPSK; if (show) dprintk("dib8000_get_frontend: Layer %d QPSK ", i); break; case 2: - fe->dtv_property_cache.layer[i].modulation = QAM_16; + c->layer[i].modulation = QAM_16; if (show) dprintk("dib8000_get_frontend: Layer %d QAM16 ", i); break; case 3: default: - fe->dtv_property_cache.layer[i].modulation = QAM_64; + c->layer[i].modulation = QAM_64; if (show) dprintk("dib8000_get_frontend: Layer %d QAM64 ", i); break; @@ -3553,16 +3554,16 @@ static int dib8000_get_frontend(struct dvb_frontend *fe) /* synchronize the cache with the other frontends */ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { - state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode = fe->dtv_property_cache.isdbt_sb_mode; - state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion; - state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode; - state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval; - state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception = fe->dtv_property_cache.isdbt_partial_reception; + state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode = c->isdbt_sb_mode; + state->fe[index_frontend]->dtv_property_cache.inversion = c->inversion; + state->fe[index_frontend]->dtv_property_cache.transmission_mode = c->transmission_mode; + state->fe[index_frontend]->dtv_property_cache.guard_interval = c->guard_interval; + state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception = c->isdbt_partial_reception; for (i = 0; i < 3; i++) { - state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count = fe->dtv_property_cache.layer[i].segment_count; - state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving = fe->dtv_property_cache.layer[i].interleaving; - state->fe[index_frontend]->dtv_property_cache.layer[i].fec = fe->dtv_property_cache.layer[i].fec; - state->fe[index_frontend]->dtv_property_cache.layer[i].modulation = fe->dtv_property_cache.layer[i].modulation; + state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count = c->layer[i].segment_count; + state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving = c->layer[i].interleaving; + state->fe[index_frontend]->dtv_property_cache.layer[i].fec = c->layer[i].fec; + state->fe[index_frontend]->dtv_property_cache.layer[i].modulation = c->layer[i].modulation; } } return 0; @@ -3671,7 +3672,7 @@ static int dib8000_set_frontend(struct dvb_frontend *fe) if (state->channel_parameters_set == 0) { /* searching */ if ((dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_DEMOD_SUCCESS) || (dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_FFT_SUCCESS)) { dprintk("autosearch succeeded on fe%i", index_frontend); - dib8000_get_frontend(state->fe[index_frontend]); /* we read the channel parameters from the frontend which was successful */ + dib8000_get_frontend(state->fe[index_frontend], c); /* we read the channel parameters from the frontend which was successful */ state->channel_parameters_set = 1; for (l = 0; (l < MAX_NUMBER_OF_FRONTENDS) && (state->fe[l] != NULL); l++) { @@ -4516,6 +4517,6 @@ void *dib8000_attach(struct dib8000_ops *ops) } EXPORT_SYMBOL(dib8000_attach); -MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@dibcom.fr, " "Patrick Boettcher <pboettcher@dibcom.fr>"); +MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@parrot.com, Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c index 8f92aca0b073..bab8c5a980a2 100644 --- a/drivers/media/dvb-frontends/dib9000.c +++ b/drivers/media/dvb-frontends/dib9000.c @@ -1889,7 +1889,8 @@ static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_fron return 0; } -static int dib9000_get_frontend(struct dvb_frontend *fe) +static int dib9000_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct dib9000_state *state = fe->demodulator_priv; u8 index_frontend, sub_index_frontend; @@ -1909,7 +1910,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe) dprintk("TPS lock on the slave%i", index_frontend); /* synchronize the cache with the other frontends */ - state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend]); + state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c); for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) { if (sub_index_frontend != index_frontend) { @@ -1943,14 +1944,14 @@ static int dib9000_get_frontend(struct dvb_frontend *fe) /* synchronize the cache with the other frontends */ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { - state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion; - state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode; - state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval; - state->fe[index_frontend]->dtv_property_cache.modulation = fe->dtv_property_cache.modulation; - state->fe[index_frontend]->dtv_property_cache.hierarchy = fe->dtv_property_cache.hierarchy; - state->fe[index_frontend]->dtv_property_cache.code_rate_HP = fe->dtv_property_cache.code_rate_HP; - state->fe[index_frontend]->dtv_property_cache.code_rate_LP = fe->dtv_property_cache.code_rate_LP; - state->fe[index_frontend]->dtv_property_cache.rolloff = fe->dtv_property_cache.rolloff; + state->fe[index_frontend]->dtv_property_cache.inversion = c->inversion; + state->fe[index_frontend]->dtv_property_cache.transmission_mode = c->transmission_mode; + state->fe[index_frontend]->dtv_property_cache.guard_interval = c->guard_interval; + state->fe[index_frontend]->dtv_property_cache.modulation = c->modulation; + state->fe[index_frontend]->dtv_property_cache.hierarchy = c->hierarchy; + state->fe[index_frontend]->dtv_property_cache.code_rate_HP = c->code_rate_HP; + state->fe[index_frontend]->dtv_property_cache.code_rate_LP = c->code_rate_LP; + state->fe[index_frontend]->dtv_property_cache.rolloff = c->rolloff; } ret = 0; @@ -2083,7 +2084,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe) /* synchronize all the channel cache */ state->get_frontend_internal = 1; - dib9000_get_frontend(state->fe[0]); + dib9000_get_frontend(state->fe[0], &state->fe[0]->dtv_property_cache); state->get_frontend_internal = 0; /* retune the other frontends with the found channel */ @@ -2589,7 +2590,7 @@ static struct dvb_frontend_ops dib9000_ops = { .read_ucblocks = dib9000_read_unc_blocks, }; -MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); -MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); +MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>"); MODULE_DESCRIPTION("Driver for the DiBcom 9000 COFDM demodulator"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/dibx000_common.c b/drivers/media/dvb-frontends/dibx000_common.c index 43be7238311e..723358d7ca84 100644 --- a/drivers/media/dvb-frontends/dibx000_common.c +++ b/drivers/media/dvb-frontends/dibx000_common.c @@ -510,6 +510,6 @@ u32 systime(void) } EXPORT_SYMBOL(systime); -MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Common function the DiBcom demodulator family"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c index 14e996d45fac..e5bd8c62ad3a 100644 --- a/drivers/media/dvb-frontends/dvb_dummy_fe.c +++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c @@ -70,9 +70,12 @@ static int dvb_dummy_fe_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) } /* - * Only needed if it actually reads something from the hardware + * Should only be implemented if it actually reads something from the hardware. + * Also, it should check for the locks, in order to avoid report wrong data + * to userspace. */ -static int dvb_dummy_fe_get_frontend(struct dvb_frontend *fe) +static int dvb_dummy_fe_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { return 0; } diff --git a/drivers/media/dvb-frontends/hd29l2.c b/drivers/media/dvb-frontends/hd29l2.c index 40e359f2d17d..1c7eb477e2cd 100644 --- a/drivers/media/dvb-frontends/hd29l2.c +++ b/drivers/media/dvb-frontends/hd29l2.c @@ -560,11 +560,11 @@ static int hd29l2_get_frontend_algo(struct dvb_frontend *fe) return DVBFE_ALGO_CUSTOM; } -static int hd29l2_get_frontend(struct dvb_frontend *fe) +static int hd29l2_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { int ret; struct hd29l2_priv *priv = fe->demodulator_priv; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; u8 buf[3]; u32 if_ctl; char *str_constellation, *str_code_rate, *str_constellation_code_rate, diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c index 0977871232a2..2f3d0519e19b 100644 --- a/drivers/media/dvb-frontends/l64781.c +++ b/drivers/media/dvb-frontends/l64781.c @@ -243,9 +243,9 @@ static int apply_frontend_param(struct dvb_frontend *fe) return 0; } -static int get_frontend(struct dvb_frontend *fe) +static int get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct l64781_state* state = fe->demodulator_priv; int tmp; diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c index 7880f71ccd8a..f51a3a0b3949 100644 --- a/drivers/media/dvb-frontends/lg2160.c +++ b/drivers/media/dvb-frontends/lg2160.c @@ -942,101 +942,102 @@ static int lg216x_read_rs_err_count(struct lg216x_state *state, u16 *err) /* ------------------------------------------------------------------------ */ -static int lg216x_get_frontend(struct dvb_frontend *fe) +static int lg216x_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct lg216x_state *state = fe->demodulator_priv; int ret; lg_dbg("\n"); - fe->dtv_property_cache.modulation = VSB_8; - fe->dtv_property_cache.frequency = state->current_frequency; - fe->dtv_property_cache.delivery_system = SYS_ATSCMH; + c->modulation = VSB_8; + c->frequency = state->current_frequency; + c->delivery_system = SYS_ATSCMH; ret = lg216x_get_fic_version(state, - &fe->dtv_property_cache.atscmh_fic_ver); + &c->atscmh_fic_ver); if (lg_fail(ret)) goto fail; - if (state->fic_ver != fe->dtv_property_cache.atscmh_fic_ver) { - state->fic_ver = fe->dtv_property_cache.atscmh_fic_ver; + if (state->fic_ver != c->atscmh_fic_ver) { + state->fic_ver = c->atscmh_fic_ver; #if 0 ret = lg2160_get_parade_id(state, - &fe->dtv_property_cache.atscmh_parade_id); + &c->atscmh_parade_id); if (lg_fail(ret)) goto fail; /* #else */ - fe->dtv_property_cache.atscmh_parade_id = state->parade_id; + c->atscmh_parade_id = state->parade_id; #endif ret = lg216x_get_nog(state, - &fe->dtv_property_cache.atscmh_nog); + &c->atscmh_nog); if (lg_fail(ret)) goto fail; ret = lg216x_get_tnog(state, - &fe->dtv_property_cache.atscmh_tnog); + &c->atscmh_tnog); if (lg_fail(ret)) goto fail; ret = lg216x_get_sgn(state, - &fe->dtv_property_cache.atscmh_sgn); + &c->atscmh_sgn); if (lg_fail(ret)) goto fail; ret = lg216x_get_prc(state, - &fe->dtv_property_cache.atscmh_prc); + &c->atscmh_prc); if (lg_fail(ret)) goto fail; ret = lg216x_get_rs_frame_mode(state, (enum atscmh_rs_frame_mode *) - &fe->dtv_property_cache.atscmh_rs_frame_mode); + &c->atscmh_rs_frame_mode); if (lg_fail(ret)) goto fail; ret = lg216x_get_rs_frame_ensemble(state, (enum atscmh_rs_frame_ensemble *) - &fe->dtv_property_cache.atscmh_rs_frame_ensemble); + &c->atscmh_rs_frame_ensemble); if (lg_fail(ret)) goto fail; ret = lg216x_get_rs_code_mode(state, (enum atscmh_rs_code_mode *) - &fe->dtv_property_cache.atscmh_rs_code_mode_pri, + &c->atscmh_rs_code_mode_pri, (enum atscmh_rs_code_mode *) - &fe->dtv_property_cache.atscmh_rs_code_mode_sec); + &c->atscmh_rs_code_mode_sec); if (lg_fail(ret)) goto fail; ret = lg216x_get_sccc_block_mode(state, (enum atscmh_sccc_block_mode *) - &fe->dtv_property_cache.atscmh_sccc_block_mode); + &c->atscmh_sccc_block_mode); if (lg_fail(ret)) goto fail; ret = lg216x_get_sccc_code_mode(state, (enum atscmh_sccc_code_mode *) - &fe->dtv_property_cache.atscmh_sccc_code_mode_a, + &c->atscmh_sccc_code_mode_a, (enum atscmh_sccc_code_mode *) - &fe->dtv_property_cache.atscmh_sccc_code_mode_b, + &c->atscmh_sccc_code_mode_b, (enum atscmh_sccc_code_mode *) - &fe->dtv_property_cache.atscmh_sccc_code_mode_c, + &c->atscmh_sccc_code_mode_c, (enum atscmh_sccc_code_mode *) - &fe->dtv_property_cache.atscmh_sccc_code_mode_d); + &c->atscmh_sccc_code_mode_d); if (lg_fail(ret)) goto fail; } #if 0 ret = lg216x_read_fic_err_count(state, - (u8 *)&fe->dtv_property_cache.atscmh_fic_err); + (u8 *)&c->atscmh_fic_err); if (lg_fail(ret)) goto fail; ret = lg216x_read_crc_err_count(state, - &fe->dtv_property_cache.atscmh_crc_err); + &c->atscmh_crc_err); if (lg_fail(ret)) goto fail; ret = lg216x_read_rs_err_count(state, - &fe->dtv_property_cache.atscmh_rs_err); + &c->atscmh_rs_err); if (lg_fail(ret)) goto fail; switch (state->cfg->lg_chip) { case LG2160: - if (((fe->dtv_property_cache.atscmh_rs_err >= 240) && - (fe->dtv_property_cache.atscmh_crc_err >= 240)) && + if (((c->atscmh_rs_err >= 240) && + (c->atscmh_crc_err >= 240)) && ((jiffies_to_msecs(jiffies) - state->last_reset) > 6000)) ret = lg216x_soft_reset(state); break; @@ -1054,14 +1055,17 @@ fail: static int lg216x_get_property(struct dvb_frontend *fe, struct dtv_property *tvp) { + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + return (DTV_ATSCMH_FIC_VER == tvp->cmd) ? - lg216x_get_frontend(fe) : 0; + lg216x_get_frontend(fe, c) : 0; } static int lg2160_set_frontend(struct dvb_frontend *fe) { struct lg216x_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; lg_dbg("(%d)\n", fe->dtv_property_cache.frequency); @@ -1129,7 +1133,7 @@ static int lg2160_set_frontend(struct dvb_frontend *fe) ret = lg216x_enable_fic(state, 1); lg_fail(ret); - lg216x_get_frontend(fe); + lg216x_get_frontend(fe, c); fail: return ret; } diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c index 47121866163d..4503e8852fd1 100644 --- a/drivers/media/dvb-frontends/lgdt3305.c +++ b/drivers/media/dvb-frontends/lgdt3305.c @@ -812,9 +812,9 @@ fail: return ret; } -static int lgdt3305_get_frontend(struct dvb_frontend *fe) +static int lgdt3305_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct lgdt3305_state *state = fe->demodulator_priv; lg_dbg("\n"); diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index 721fbc07e9ee..179c26e5eb4e 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -1040,10 +1040,10 @@ fail: return ret; } -static int lgdt3306a_get_frontend(struct dvb_frontend *fe) +static int lgdt3306a_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { struct lgdt3306a_state *state = fe->demodulator_priv; - struct dtv_frontend_properties *p = &fe->dtv_property_cache; dbg_info("(%u, %d)\n", state->current_frequency, state->current_modulation); diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c index cf3cc20510da..96bf254da21e 100644 --- a/drivers/media/dvb-frontends/lgdt330x.c +++ b/drivers/media/dvb-frontends/lgdt330x.c @@ -439,10 +439,11 @@ static int lgdt330x_set_parameters(struct dvb_frontend *fe) return 0; } -static int lgdt330x_get_frontend(struct dvb_frontend *fe) +static int lgdt330x_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct lgdt330x_state *state = fe->demodulator_priv; + p->frequency = state->current_frequency; return 0; } diff --git a/drivers/media/dvb-frontends/lgs8gl5.c b/drivers/media/dvb-frontends/lgs8gl5.c index 7bbb2c18c2dd..fbfd87b5b803 100644 --- a/drivers/media/dvb-frontends/lgs8gl5.c +++ b/drivers/media/dvb-frontends/lgs8gl5.c @@ -336,10 +336,11 @@ lgs8gl5_set_frontend(struct dvb_frontend *fe) static int -lgs8gl5_get_frontend(struct dvb_frontend *fe) +lgs8gl5_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct lgs8gl5_state *state = fe->demodulator_priv; + u8 inv = lgs8gl5_read_reg(state, REG_INVERSION); p->inversion = (inv & REG_INVERSION_ON) ? INVERSION_ON : INVERSION_OFF; diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c index e2c191c8b196..919daeb96747 100644 --- a/drivers/media/dvb-frontends/lgs8gxx.c +++ b/drivers/media/dvb-frontends/lgs8gxx.c @@ -672,7 +672,7 @@ static int lgs8gxx_write(struct dvb_frontend *fe, const u8 buf[], int len) static int lgs8gxx_set_fe(struct dvb_frontend *fe) { - + struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; struct lgs8gxx_state *priv = fe->demodulator_priv; dprintk("%s\n", __func__); @@ -689,17 +689,7 @@ static int lgs8gxx_set_fe(struct dvb_frontend *fe) msleep(10); - return 0; -} - -static int lgs8gxx_get_fe(struct dvb_frontend *fe) -{ - struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; - dprintk("%s\n", __func__); - /* TODO: get real readings from device */ - /* inversion status */ - fe_params->inversion = INVERSION_OFF; /* bandwidth */ fe_params->bandwidth_hz = 8000000; @@ -1016,7 +1006,6 @@ static struct dvb_frontend_ops lgs8gxx_ops = { .i2c_gate_ctrl = lgs8gxx_i2c_gate_ctrl, .set_frontend = lgs8gxx_set_fe, - .get_frontend = lgs8gxx_get_fe, .get_tune_settings = lgs8gxx_get_tune_settings, .read_status = lgs8gxx_read_status, diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index ce73a5ec6036..76883600ec6f 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -791,11 +791,11 @@ err: return ret; } -static int m88ds3103_get_frontend(struct dvb_frontend *fe) +static int m88ds3103_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct m88ds3103_dev *dev = fe->demodulator_priv; struct i2c_client *client = dev->client; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; u8 buf[3]; diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c index 9b6f464c48bd..a09b12313a73 100644 --- a/drivers/media/dvb-frontends/m88rs2000.c +++ b/drivers/media/dvb-frontends/m88rs2000.c @@ -708,10 +708,11 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe) return 0; } -static int m88rs2000_get_frontend(struct dvb_frontend *fe) +static int m88rs2000_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { - struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct m88rs2000_state *state = fe->demodulator_priv; + c->fec_inner = state->fec_inner; c->frequency = state->tuner_frequency; c->symbol_rate = state->symbol_rate; diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c index cfc005ee11d8..fb88dddaf3a3 100644 --- a/drivers/media/dvb-frontends/mb86a20s.c +++ b/drivers/media/dvb-frontends/mb86a20s.c @@ -2028,16 +2028,6 @@ static int mb86a20s_read_signal_strength_from_cache(struct dvb_frontend *fe, return 0; } -static int mb86a20s_get_frontend_dummy(struct dvb_frontend *fe) -{ - /* - * get_frontend is now handled together with other stats - * retrival, when read_status() is called, as some statistics - * will depend on the layers detection. - */ - return 0; -}; - static int mb86a20s_tune(struct dvb_frontend *fe, bool re_tune, unsigned int mode_flags, @@ -2136,7 +2126,6 @@ static struct dvb_frontend_ops mb86a20s_ops = { .init = mb86a20s_initfe, .set_frontend = mb86a20s_set_frontend, - .get_frontend = mb86a20s_get_frontend_dummy, .read_status = mb86a20s_read_status_and_stats, .read_signal_strength = mb86a20s_read_signal_strength_from_cache, .tune = mb86a20s_tune, diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c index c36e6764eead..fc08429c99b7 100644 --- a/drivers/media/dvb-frontends/mt312.c +++ b/drivers/media/dvb-frontends/mt312.c @@ -647,9 +647,9 @@ static int mt312_set_frontend(struct dvb_frontend *fe) return 0; } -static int mt312_get_frontend(struct dvb_frontend *fe) +static int mt312_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct mt312_state *state = fe->demodulator_priv; int ret; diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c index 123bb2f8e4b6..c0bb6328956b 100644 --- a/drivers/media/dvb-frontends/mt352.c +++ b/drivers/media/dvb-frontends/mt352.c @@ -311,9 +311,9 @@ static int mt352_set_parameters(struct dvb_frontend *fe) return 0; } -static int mt352_get_parameters(struct dvb_frontend* fe) +static int mt352_get_parameters(struct dvb_frontend* fe, + struct dtv_frontend_properties *op) { - struct dtv_frontend_properties *op = &fe->dtv_property_cache; struct mt352_state* state = fe->demodulator_priv; u16 tps; u16 div; diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c index 35b1053b3640..a165af990672 100644 --- a/drivers/media/dvb-frontends/or51132.c +++ b/drivers/media/dvb-frontends/or51132.c @@ -375,9 +375,9 @@ static int or51132_set_parameters(struct dvb_frontend *fe) return 0; } -static int or51132_get_parameters(struct dvb_frontend* fe) +static int or51132_get_parameters(struct dvb_frontend* fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct or51132_state* state = fe->demodulator_priv; int status; int retry = 1; diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index b792f305cf15..3f96429af0e5 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c @@ -279,11 +279,11 @@ err: return ret; } -static int rtl2830_get_frontend(struct dvb_frontend *fe) +static int rtl2830_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct i2c_client *client = fe->demodulator_priv; struct rtl2830_dev *dev = i2c_get_clientdata(client); - struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; u8 buf[3]; @@ -900,6 +900,9 @@ static int rtl2830_remove(struct i2c_client *client) dev_dbg(&client->dev, "\n"); + /* stop statistics polling */ + cancel_delayed_work_sync(&dev->stat_work); + i2c_del_mux_adapter(dev->adapter); regmap_exit(dev->regmap); kfree(dev); diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index 10f2119935da..c2469fb42f12 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -575,11 +575,11 @@ err: return ret; } -static int rtl2832_get_frontend(struct dvb_frontend *fe) +static int rtl2832_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct rtl2832_dev *dev = fe->demodulator_priv; struct i2c_client *client = dev->client; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; u8 buf[3]; diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c index 10964848a2f1..c68965ad97c0 100644 --- a/drivers/media/dvb-frontends/s5h1409.c +++ b/drivers/media/dvb-frontends/s5h1409.c @@ -925,9 +925,9 @@ static int s5h1409_read_ber(struct dvb_frontend *fe, u32 *ber) return s5h1409_read_ucblocks(fe, ber); } -static int s5h1409_get_frontend(struct dvb_frontend *fe) +static int s5h1409_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct s5h1409_state *state = fe->demodulator_priv; p->frequency = state->current_frequency; diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c index 9afc3f42290e..90f86e82b087 100644 --- a/drivers/media/dvb-frontends/s5h1411.c +++ b/drivers/media/dvb-frontends/s5h1411.c @@ -840,9 +840,9 @@ static int s5h1411_read_ber(struct dvb_frontend *fe, u32 *ber) return s5h1411_read_ucblocks(fe, ber); } -static int s5h1411_get_frontend(struct dvb_frontend *fe) +static int s5h1411_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct s5h1411_state *state = fe->demodulator_priv; p->frequency = state->current_frequency; diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c index 9c22a4c70d87..d7d0b7d57ad7 100644 --- a/drivers/media/dvb-frontends/s5h1420.c +++ b/drivers/media/dvb-frontends/s5h1420.c @@ -756,9 +756,9 @@ static int s5h1420_set_frontend(struct dvb_frontend *fe) return 0; } -static int s5h1420_get_frontend(struct dvb_frontend* fe) +static int s5h1420_get_frontend(struct dvb_frontend* fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct s5h1420_state* state = fe->demodulator_priv; p->frequency = state->tunedfreq + s5h1420_getfreqoffset(state); diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c index d6a8fa63040b..b5e3d90eba5e 100644 --- a/drivers/media/dvb-frontends/s921.c +++ b/drivers/media/dvb-frontends/s921.c @@ -433,9 +433,9 @@ static int s921_set_frontend(struct dvb_frontend *fe) return 0; } -static int s921_get_frontend(struct dvb_frontend *fe) +static int s921_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct s921_state *state = fe->demodulator_priv; /* FIXME: Probably it is possible to get it from regs f1 and f2 */ diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c index 2b93241d4bc1..8bf716a8ea58 100644 --- a/drivers/media/dvb-frontends/si2165.c +++ b/drivers/media/dvb-frontends/si2165.c @@ -225,22 +225,18 @@ static int si2165_writereg32(struct si2165_state *state, const u16 reg, u32 val) static int si2165_writereg_mask8(struct si2165_state *state, const u16 reg, u8 val, u8 mask) { - int ret; - u8 tmp; - if (mask != 0xff) { - ret = si2165_readreg8(state, reg, &tmp); + u8 tmp; + int ret = si2165_readreg8(state, reg, &tmp); + if (ret < 0) - goto err; + return ret; val &= mask; tmp &= ~mask; val |= tmp; } - - ret = si2165_writereg8(state, reg, val); -err: - return ret; + return si2165_writereg8(state, reg, val); } #define REG16(reg, val) { (reg), (val) & 0xff }, { (reg)+1, (val)>>8 & 0xff } @@ -825,19 +821,19 @@ static int si2165_set_frontend_dvbt(struct dvb_frontend *fe) struct si2165_state *state = fe->demodulator_priv; u32 dvb_rate = 0; u16 bw10k; + u32 bw_hz = p->bandwidth_hz; dprintk("%s: called\n", __func__); if (!state->has_dvbt) return -EINVAL; - if (p->bandwidth_hz > 0) { - dvb_rate = p->bandwidth_hz * 8 / 7; - bw10k = p->bandwidth_hz / 10000; - } else { - dvb_rate = 8 * 8 / 7; - bw10k = 800; - } + /* no bandwidth auto-detection */ + if (bw_hz == 0) + return -EINVAL; + + dvb_rate = bw_hz * 8 / 7; + bw10k = bw_hz / 10000; ret = si2165_adjust_pll_divl(state, 12); if (ret < 0) diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c index 756650f154ab..3d171b0e00c2 100644 --- a/drivers/media/dvb-frontends/stb0899_drv.c +++ b/drivers/media/dvb-frontends/stb0899_drv.c @@ -1568,9 +1568,9 @@ static enum dvbfe_search stb0899_search(struct dvb_frontend *fe) return DVBFE_ALGO_SEARCH_ERROR; } -static int stb0899_get_frontend(struct dvb_frontend *fe) +static int stb0899_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct stb0899_state *state = fe->demodulator_priv; struct stb0899_internal *internal = &state->internal; diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c index c978c801c7aa..b9c2511bf019 100644 --- a/drivers/media/dvb-frontends/stb6100.c +++ b/drivers/media/dvb-frontends/stb6100.c @@ -346,7 +346,7 @@ static int stb6100_set_frequency(struct dvb_frontend *fe, u32 frequency) if (fe->ops.get_frontend) { dprintk(verbose, FE_DEBUG, 1, "Get frontend parameters"); - fe->ops.get_frontend(fe); + fe->ops.get_frontend(fe, p); } srate = p->symbol_rate; diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c index 75b4d8b25657..81b27b7c0c96 100644 --- a/drivers/media/dvb-frontends/stv0297.c +++ b/drivers/media/dvb-frontends/stv0297.c @@ -615,9 +615,9 @@ timeout: return 0; } -static int stv0297_get_frontend(struct dvb_frontend *fe) +static int stv0297_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct stv0297_state *state = fe->demodulator_priv; int reg_00, reg_83; diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c index a8177807fb65..7927fa925f2f 100644 --- a/drivers/media/dvb-frontends/stv0299.c +++ b/drivers/media/dvb-frontends/stv0299.c @@ -422,7 +422,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long if (debug_legacy_dish_switch) printk ("%s switch command: 0x%04lx\n",__func__, cmd); - nexttime = ktime_get_real(); + nexttime = ktime_get_boottime(); if (debug_legacy_dish_switch) tv[0] = nexttime; stv0299_writeregI (state, 0x0c, reg0x0c | 0x50); /* set LNB to 18V */ @@ -431,7 +431,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long for (i=0; i<9; i++) { if (debug_legacy_dish_switch) - tv[i+1] = ktime_get_real(); + tv[i+1] = ktime_get_boottime(); if((cmd & 0x01) != last) { /* set voltage to (last ? 13V : 18V) */ stv0299_writeregI (state, 0x0c, reg0x0c | (last ? lv_mask : 0x50)); @@ -602,9 +602,9 @@ static int stv0299_set_frontend(struct dvb_frontend *fe) return 0; } -static int stv0299_get_frontend(struct dvb_frontend *fe) +static int stv0299_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct stv0299_state* state = fe->demodulator_priv; s32 derot_freq; int invval; diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index 44cb73f68af6..abc379aea713 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -1938,9 +1938,9 @@ static int stv0367ter_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) return 0; } -static int stv0367ter_get_frontend(struct dvb_frontend *fe) +static int stv0367ter_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct stv0367_state *state = fe->demodulator_priv; struct stv0367ter_state *ter_state = state->ter_state; enum stv0367_ter_mode mode; @@ -3146,9 +3146,9 @@ static int stv0367cab_set_frontend(struct dvb_frontend *fe) return 0; } -static int stv0367cab_get_frontend(struct dvb_frontend *fe) +static int stv0367cab_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct stv0367_state *state = fe->demodulator_priv; struct stv0367cab_state *cab_state = state->cab_state; diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c index fe31dd541955..28239b1fd954 100644 --- a/drivers/media/dvb-frontends/stv0900_core.c +++ b/drivers/media/dvb-frontends/stv0900_core.c @@ -1859,9 +1859,9 @@ static int stv0900_sleep(struct dvb_frontend *fe) return 0; } -static int stv0900_get_frontend(struct dvb_frontend *fe) +static int stv0900_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct stv0900_state *state = fe->demodulator_priv; struct stv0900_internal *intp = state->internal; enum fe_stv0900_demod_num demod = state->demod; diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c index e66154e5c1d7..a62c01e454f5 100644 --- a/drivers/media/dvb-frontends/stv6110x.c +++ b/drivers/media/dvb-frontends/stv6110x.c @@ -355,7 +355,7 @@ static struct dvb_tuner_ops stv6110x_ops = { .release = stv6110x_release }; -static struct stv6110x_devctl stv6110x_ctl = { +static const struct stv6110x_devctl stv6110x_ctl = { .tuner_init = stv6110x_init, .tuner_sleep = stv6110x_sleep, .tuner_set_mode = stv6110x_set_mode, @@ -369,7 +369,7 @@ static struct stv6110x_devctl stv6110x_ctl = { .tuner_get_status = stv6110x_get_status, }; -struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe, +const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe, const struct stv6110x_config *config, struct i2c_adapter *i2c) { diff --git a/drivers/media/dvb-frontends/stv6110x.h b/drivers/media/dvb-frontends/stv6110x.h index 9f7eb251aec3..696b6e5b9e7b 100644 --- a/drivers/media/dvb-frontends/stv6110x.h +++ b/drivers/media/dvb-frontends/stv6110x.h @@ -55,12 +55,12 @@ struct stv6110x_devctl { #if IS_REACHABLE(CONFIG_DVB_STV6110x) -extern struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe, +extern const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe, const struct stv6110x_config *config, struct i2c_adapter *i2c); #else -static inline struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe, +static inline const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe, const struct stv6110x_config *config, struct i2c_adapter *i2c) { diff --git a/drivers/media/dvb-frontends/stv6110x_priv.h b/drivers/media/dvb-frontends/stv6110x_priv.h index 0ec936a660a7..a993aba27b7e 100644 --- a/drivers/media/dvb-frontends/stv6110x_priv.h +++ b/drivers/media/dvb-frontends/stv6110x_priv.h @@ -70,7 +70,7 @@ struct stv6110x_state { const struct stv6110x_config *config; u8 regs[8]; - struct stv6110x_devctl *devctl; + const struct stv6110x_devctl *devctl; }; #endif /* __STV6110x_PRIV_H */ diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c index 456cdc7fb1e7..31cd32532387 100644 --- a/drivers/media/dvb-frontends/tc90522.c +++ b/drivers/media/dvb-frontends/tc90522.c @@ -201,10 +201,10 @@ static const enum fe_code_rate fec_conv_sat[] = { FEC_2_3, /* for 8PSK. (trellis code) */ }; -static int tc90522s_get_frontend(struct dvb_frontend *fe) +static int tc90522s_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct tc90522_state *state; - struct dtv_frontend_properties *c; struct dtv_fe_stats *stats; int ret, i; int layers; @@ -212,7 +212,6 @@ static int tc90522s_get_frontend(struct dvb_frontend *fe) u32 cndat; state = fe->demodulator_priv; - c = &fe->dtv_property_cache; c->delivery_system = SYS_ISDBS; c->symbol_rate = 28860000; @@ -337,10 +336,10 @@ static const enum fe_modulation mod_conv[] = { DQPSK, QPSK, QAM_16, QAM_64, 0, 0, 0, 0 }; -static int tc90522t_get_frontend(struct dvb_frontend *fe) +static int tc90522t_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct tc90522_state *state; - struct dtv_frontend_properties *c; struct dtv_fe_stats *stats; int ret, i; int layers; @@ -348,7 +347,6 @@ static int tc90522t_get_frontend(struct dvb_frontend *fe) u32 cndat; state = fe->demodulator_priv; - c = &fe->dtv_property_cache; c->delivery_system = SYS_ISDBT; c->bandwidth_hz = 6000000; mode = 1; diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c index a684424e665a..806c56691ca5 100644 --- a/drivers/media/dvb-frontends/tda10021.c +++ b/drivers/media/dvb-frontends/tda10021.c @@ -387,9 +387,9 @@ static int tda10021_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) return 0; } -static int tda10021_get_frontend(struct dvb_frontend *fe) +static int tda10021_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct tda10021_state* state = fe->demodulator_priv; int sync; s8 afc = 0; diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c index 44a55656093f..3b8c7e499d0d 100644 --- a/drivers/media/dvb-frontends/tda10023.c +++ b/drivers/media/dvb-frontends/tda10023.c @@ -457,9 +457,9 @@ static int tda10023_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) return 0; } -static int tda10023_get_frontend(struct dvb_frontend *fe) +static int tda10023_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct tda10023_state* state = fe->demodulator_priv; int sync,inv; s8 afc = 0; diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c index 8451086c563f..c2bf89d0b0b0 100644 --- a/drivers/media/dvb-frontends/tda10048.c +++ b/drivers/media/dvb-frontends/tda10048.c @@ -1028,9 +1028,9 @@ static int tda10048_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) return 0; } -static int tda10048_get_frontend(struct dvb_frontend *fe) +static int tda10048_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct tda10048_state *state = fe->demodulator_priv; dprintk(1, "%s()\n", __func__); diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c index c6abeb4fba9d..b89848313fb9 100644 --- a/drivers/media/dvb-frontends/tda1004x.c +++ b/drivers/media/dvb-frontends/tda1004x.c @@ -899,9 +899,9 @@ static int tda1004x_set_fe(struct dvb_frontend *fe) return 0; } -static int tda1004x_get_fe(struct dvb_frontend *fe) +static int tda1004x_get_fe(struct dvb_frontend *fe, + struct dtv_frontend_properties *fe_params) { - struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; struct tda1004x_state* state = fe->demodulator_priv; int status; diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index 119d47596ac8..37ebeef2bbd0 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -701,11 +701,11 @@ error: return ret; } -static int tda10071_get_frontend(struct dvb_frontend *fe) +static int tda10071_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { struct tda10071_dev *dev = fe->demodulator_priv; struct i2c_client *client = dev->client; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret, i; u8 buf[5], tmp; diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c index 95a33e187f8e..31d0acb54fe8 100644 --- a/drivers/media/dvb-frontends/tda10086.c +++ b/drivers/media/dvb-frontends/tda10086.c @@ -459,9 +459,9 @@ static int tda10086_set_frontend(struct dvb_frontend *fe) return 0; } -static int tda10086_get_frontend(struct dvb_frontend *fe) +static int tda10086_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *fe_params) { - struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; struct tda10086_state* state = fe->demodulator_priv; u8 val; int tmp; diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c index 796543fa2c8d..9072d6463094 100644 --- a/drivers/media/dvb-frontends/tda8083.c +++ b/drivers/media/dvb-frontends/tda8083.c @@ -342,9 +342,9 @@ static int tda8083_set_frontend(struct dvb_frontend *fe) return 0; } -static int tda8083_get_frontend(struct dvb_frontend *fe) +static int tda8083_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct tda8083_state* state = fe->demodulator_priv; /* FIXME: get symbolrate & frequency offset...*/ diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index 7979e5d6498b..14b410ffe612 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c @@ -712,6 +712,10 @@ static int ts2020_remove(struct i2c_client *client) dev_dbg(&client->dev, "\n"); + /* stop statistics polling */ + if (!dev->dont_poll) + cancel_delayed_work_sync(&dev->stat_work); + regmap_exit(dev->regmap); kfree(dev); return 0; diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c index aacfdda3e005..b09fe88c40f8 100644 --- a/drivers/media/dvb-frontends/ves1820.c +++ b/drivers/media/dvb-frontends/ves1820.c @@ -312,9 +312,9 @@ static int ves1820_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) return 0; } -static int ves1820_get_frontend(struct dvb_frontend *fe) +static int ves1820_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct ves1820_state* state = fe->demodulator_priv; int sync; s8 afc = 0; diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c index 526952396422..ed113e216e14 100644 --- a/drivers/media/dvb-frontends/ves1x93.c +++ b/drivers/media/dvb-frontends/ves1x93.c @@ -406,9 +406,9 @@ static int ves1x93_set_frontend(struct dvb_frontend *fe) return 0; } -static int ves1x93_get_frontend(struct dvb_frontend *fe) +static int ves1x93_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct ves1x93_state* state = fe->demodulator_priv; int afc; diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c index ef9764a02d4c..1832c2f7695c 100644 --- a/drivers/media/dvb-frontends/zl10353.c +++ b/drivers/media/dvb-frontends/zl10353.c @@ -371,9 +371,9 @@ static int zl10353_set_parameters(struct dvb_frontend *fe) return 0; } -static int zl10353_get_parameters(struct dvb_frontend *fe) +static int zl10353_get_parameters(struct dvb_frontend *fe, + struct dtv_frontend_properties *c) { - struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct zl10353_state *state = fe->demodulator_priv; int s6, s9; u16 tps; diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c index 7e9cbf757e95..fb7ed730d932 100644 --- a/drivers/media/i2c/adp1653.c +++ b/drivers/media/i2c/adp1653.c @@ -497,7 +497,7 @@ static int adp1653_probe(struct i2c_client *client, if (!client->dev.platform_data) { dev_err(&client->dev, "Neither DT not platform data provided\n"); - return EINVAL; + return -EINVAL; } flash->platform_data = client->dev.platform_data; } diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c index 471fd23b5c5c..bd822f032b08 100644 --- a/drivers/media/i2c/adv7511.c +++ b/drivers/media/i2c/adv7511.c @@ -103,12 +103,14 @@ struct adv7511_state { u32 ycbcr_enc; u32 quantization; u32 xfer_func; + u32 content_type; /* controls */ struct v4l2_ctrl *hdmi_mode_ctrl; struct v4l2_ctrl *hotplug_ctrl; struct v4l2_ctrl *rx_sense_ctrl; struct v4l2_ctrl *have_edid0_ctrl; struct v4l2_ctrl *rgb_quantization_range_ctrl; + struct v4l2_ctrl *content_type_ctrl; struct i2c_client *i2c_edid; struct i2c_client *i2c_pktmem; struct adv7511_state_edid edid; @@ -400,6 +402,16 @@ static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl) } if (state->rgb_quantization_range_ctrl == ctrl) return adv7511_set_rgb_quantization_mode(sd, ctrl); + if (state->content_type_ctrl == ctrl) { + u8 itc, cn; + + state->content_type = ctrl->val; + itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC; + cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS; + adv7511_wr_and_or(sd, 0x57, 0x7f, itc << 7); + adv7511_wr_and_or(sd, 0x59, 0xcf, cn << 4); + return 0; + } return -EINVAL; } @@ -1002,6 +1014,8 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd, u8 y = HDMI_COLORSPACE_RGB; u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT; u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; + u8 itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC; + u8 cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS; if (format->pad != 0) return -EINVAL; @@ -1115,8 +1129,8 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd, adv7511_wr_and_or(sd, 0x4a, 0xbf, 0); adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5); adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6); - adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2)); - adv7511_wr_and_or(sd, 0x59, 0x3f, yq << 6); + adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7)); + adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4)); adv7511_wr_and_or(sd, 0x4a, 0xff, 1); return 0; @@ -1161,12 +1175,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in } } +static void adv7511_notify_no_edid(struct v4l2_subdev *sd) +{ + struct adv7511_state *state = get_adv7511_state(sd); + struct adv7511_edid_detect ed; + + /* We failed to read the EDID, so send an event for this. */ + ed.present = false; + ed.segment = adv7511_rd(sd, 0xc4); + v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed); + v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0); +} + static void adv7511_edid_handler(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler); struct v4l2_subdev *sd = &state->sd; - struct adv7511_edid_detect ed; v4l2_dbg(1, debug, sd, "%s:\n", __func__); @@ -1191,9 +1216,7 @@ static void adv7511_edid_handler(struct work_struct *work) } /* We failed to read the EDID, so send an event for this. */ - ed.present = false; - ed.segment = adv7511_rd(sd, 0xc4); - v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed); + adv7511_notify_no_edid(sd); v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__); } @@ -1264,7 +1287,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd) /* update read only ctrls */ v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0); v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0); - v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0); if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) { v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__); @@ -1294,6 +1316,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd) } adv7511_s_power(sd, false); memset(&state->edid, 0, sizeof(struct adv7511_state_edid)); + adv7511_notify_no_edid(sd); } } @@ -1370,6 +1393,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd) } /* one more segment read ok */ state->edid.segments = segment + 1; + v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1); if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) { /* Request next EDID segment */ v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments); @@ -1389,7 +1413,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd) ed.present = true; ed.segment = 0; state->edid_detect_counter++; - v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0); v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed); return ed.present; } @@ -1470,6 +1493,10 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id * v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops, V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL, 0, V4L2_DV_RGB_RANGE_AUTO); + state->content_type_ctrl = + v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops, + V4L2_CID_DV_TX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC, + 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC); sd->ctrl_handler = hdl; if (hdl->error) { err = hdl->error; diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index f8dd7505b529..d35e0ba8b269 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -207,71 +207,22 @@ static bool adv76xx_has_afe(struct adv76xx_state *state) return state->info->has_afe; } -/* Supported CEA and DMT timings */ -static const struct v4l2_dv_timings adv76xx_timings[] = { - V4L2_DV_BT_CEA_720X480P59_94, - V4L2_DV_BT_CEA_720X576P50, - V4L2_DV_BT_CEA_1280X720P24, - V4L2_DV_BT_CEA_1280X720P25, - V4L2_DV_BT_CEA_1280X720P50, - V4L2_DV_BT_CEA_1280X720P60, - V4L2_DV_BT_CEA_1920X1080P24, - V4L2_DV_BT_CEA_1920X1080P25, - V4L2_DV_BT_CEA_1920X1080P30, - V4L2_DV_BT_CEA_1920X1080P50, - V4L2_DV_BT_CEA_1920X1080P60, - - /* sorted by DMT ID */ - V4L2_DV_BT_DMT_640X350P85, - V4L2_DV_BT_DMT_640X400P85, - V4L2_DV_BT_DMT_720X400P85, - V4L2_DV_BT_DMT_640X480P60, - V4L2_DV_BT_DMT_640X480P72, - V4L2_DV_BT_DMT_640X480P75, - V4L2_DV_BT_DMT_640X480P85, - V4L2_DV_BT_DMT_800X600P56, - V4L2_DV_BT_DMT_800X600P60, - V4L2_DV_BT_DMT_800X600P72, - V4L2_DV_BT_DMT_800X600P75, - V4L2_DV_BT_DMT_800X600P85, - V4L2_DV_BT_DMT_848X480P60, - V4L2_DV_BT_DMT_1024X768P60, - V4L2_DV_BT_DMT_1024X768P70, - V4L2_DV_BT_DMT_1024X768P75, - V4L2_DV_BT_DMT_1024X768P85, - V4L2_DV_BT_DMT_1152X864P75, - V4L2_DV_BT_DMT_1280X768P60_RB, - V4L2_DV_BT_DMT_1280X768P60, - V4L2_DV_BT_DMT_1280X768P75, - V4L2_DV_BT_DMT_1280X768P85, - V4L2_DV_BT_DMT_1280X800P60_RB, - V4L2_DV_BT_DMT_1280X800P60, - V4L2_DV_BT_DMT_1280X800P75, - V4L2_DV_BT_DMT_1280X800P85, - V4L2_DV_BT_DMT_1280X960P60, - V4L2_DV_BT_DMT_1280X960P85, - V4L2_DV_BT_DMT_1280X1024P60, - V4L2_DV_BT_DMT_1280X1024P75, - V4L2_DV_BT_DMT_1280X1024P85, - V4L2_DV_BT_DMT_1360X768P60, - V4L2_DV_BT_DMT_1400X1050P60_RB, - V4L2_DV_BT_DMT_1400X1050P60, - V4L2_DV_BT_DMT_1400X1050P75, - V4L2_DV_BT_DMT_1400X1050P85, - V4L2_DV_BT_DMT_1440X900P60_RB, - V4L2_DV_BT_DMT_1440X900P60, - V4L2_DV_BT_DMT_1600X1200P60, - V4L2_DV_BT_DMT_1680X1050P60_RB, - V4L2_DV_BT_DMT_1680X1050P60, - V4L2_DV_BT_DMT_1792X1344P60, - V4L2_DV_BT_DMT_1856X1392P60, - V4L2_DV_BT_DMT_1920X1200P60_RB, - V4L2_DV_BT_DMT_1366X768P60_RB, - V4L2_DV_BT_DMT_1366X768P60, - V4L2_DV_BT_DMT_1920X1080P60, - { }, +/* Unsupported timings. This device cannot support 720p30. */ +static const struct v4l2_dv_timings adv76xx_timings_exceptions[] = { + V4L2_DV_BT_CEA_1280X720P30, + { } }; +static bool adv76xx_check_dv_timings(const struct v4l2_dv_timings *t, void *hdl) +{ + int i; + + for (i = 0; adv76xx_timings_exceptions[i].bt.width; i++) + if (v4l2_match_dv_timings(t, adv76xx_timings_exceptions + i, 0, false)) + return false; + return true; +} + struct adv76xx_video_standards { struct v4l2_dv_timings timings; u8 vid_std; @@ -806,6 +757,36 @@ static inline bool is_digital_input(struct v4l2_subdev *sd) state->selected_input == ADV7604_PAD_HDMI_PORT_D; } +static const struct v4l2_dv_timings_cap adv7604_timings_cap_analog = { + .type = V4L2_DV_BT_656_1120, + /* keep this initialization for compatibility with GCC < 4.4.6 */ + .reserved = { 0 }, + V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000, + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | + V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | + V4L2_DV_BT_CAP_CUSTOM) +}; + +static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = { + .type = V4L2_DV_BT_656_1120, + /* keep this initialization for compatibility with GCC < 4.4.6 */ + .reserved = { 0 }, + V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000, + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | + V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | + V4L2_DV_BT_CAP_CUSTOM) +}; + +static inline const struct v4l2_dv_timings_cap * +adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd) +{ + return is_digital_input(sd) ? &adv76xx_timings_cap_digital : + &adv7604_timings_cap_analog; +} + + /* ----------------------------------------------------------------------- */ #ifdef CONFIG_VIDEO_ADV_DEBUG @@ -1216,6 +1197,20 @@ static int adv76xx_s_ctrl(struct v4l2_ctrl *ctrl) return -EINVAL; } +static int adv76xx_g_volatile_ctrl(struct v4l2_ctrl *ctrl) +{ + struct v4l2_subdev *sd = + &container_of(ctrl->handler, struct adv76xx_state, hdl)->sd; + + if (ctrl->id == V4L2_CID_DV_RX_IT_CONTENT_TYPE) { + ctrl->val = V4L2_DV_IT_CONTENT_TYPE_NO_ITC; + if ((io_read(sd, 0x60) & 1) && (infoframe_read(sd, 0x03) & 0x80)) + ctrl->val = (infoframe_read(sd, 0x05) >> 4) & 3; + return 0; + } + return -EINVAL; +} + /* ----------------------------------------------------------------------- */ static inline bool no_power(struct v4l2_subdev *sd) @@ -1330,17 +1325,23 @@ static int stdi2dv_timings(struct v4l2_subdev *sd, u32 pix_clk; int i; - for (i = 0; adv76xx_timings[i].bt.height; i++) { - if (vtotal(&adv76xx_timings[i].bt) != stdi->lcf + 1) + for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) { + const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt; + + if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i], + adv76xx_get_dv_timings_cap(sd), + adv76xx_check_dv_timings, NULL)) + continue; + if (vtotal(bt) != stdi->lcf + 1) continue; - if (adv76xx_timings[i].bt.vsync != stdi->lcvs) + if (bt->vsync != stdi->lcvs) continue; - pix_clk = hfreq * htotal(&adv76xx_timings[i].bt); + pix_clk = hfreq * htotal(bt); - if ((pix_clk < adv76xx_timings[i].bt.pixelclock + 1000000) && - (pix_clk > adv76xx_timings[i].bt.pixelclock - 1000000)) { - *timings = adv76xx_timings[i]; + if ((pix_clk < bt->pixelclock + 1000000) && + (pix_clk > bt->pixelclock - 1000000)) { + *timings = v4l2_dv_timings_presets[i]; return 0; } } @@ -1425,15 +1426,11 @@ static int adv76xx_enum_dv_timings(struct v4l2_subdev *sd, { struct adv76xx_state *state = to_state(sd); - if (timings->index >= ARRAY_SIZE(adv76xx_timings) - 1) - return -EINVAL; - if (timings->pad >= state->source_pad) return -EINVAL; - memset(timings->reserved, 0, sizeof(timings->reserved)); - timings->timings = adv76xx_timings[timings->index]; - return 0; + return v4l2_enum_dv_timings_cap(timings, + adv76xx_get_dv_timings_cap(sd), adv76xx_check_dv_timings, NULL); } static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd, @@ -1444,29 +1441,7 @@ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd, if (cap->pad >= state->source_pad) return -EINVAL; - cap->type = V4L2_DV_BT_656_1120; - cap->bt.max_width = 1920; - cap->bt.max_height = 1200; - cap->bt.min_pixelclock = 25000000; - - switch (cap->pad) { - case ADV76XX_PAD_HDMI_PORT_A: - case ADV7604_PAD_HDMI_PORT_B: - case ADV7604_PAD_HDMI_PORT_C: - case ADV7604_PAD_HDMI_PORT_D: - cap->bt.max_pixelclock = 225000000; - break; - case ADV7604_PAD_VGA_RGB: - case ADV7604_PAD_VGA_COMP: - default: - cap->bt.max_pixelclock = 170000000; - break; - } - - cap->bt.standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | - V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT; - cap->bt.capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | - V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM; + *cap = *adv76xx_get_dv_timings_cap(sd); return 0; } @@ -1475,15 +1450,9 @@ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd, static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd, struct v4l2_dv_timings *timings) { - int i; - - for (i = 0; adv76xx_timings[i].bt.width; i++) { - if (v4l2_match_dv_timings(timings, &adv76xx_timings[i], - is_digital_input(sd) ? 250000 : 1000000, false)) { - *timings = adv76xx_timings[i]; - break; - } - } + v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd), + is_digital_input(sd) ? 250000 : 1000000, + adv76xx_check_dv_timings, NULL); } static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd) @@ -1651,12 +1620,9 @@ static int adv76xx_s_dv_timings(struct v4l2_subdev *sd, bt = &timings->bt; - if ((is_analog_input(sd) && bt->pixelclock > 170000000) || - (is_digital_input(sd) && bt->pixelclock > 225000000)) { - v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n", - __func__, (u32)bt->pixelclock); + if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd), + adv76xx_check_dv_timings, NULL)) return -ERANGE; - } adv76xx_fill_optional_dv_timings_fields(sd, timings); @@ -1884,6 +1850,26 @@ static int adv76xx_get_format(struct v4l2_subdev *sd, return 0; } +static int adv76xx_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct adv76xx_state *state = to_state(sd); + + if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) + return -EINVAL; + /* Only CROP, CROP_DEFAULT and CROP_BOUNDS are supported */ + if (sel->target > V4L2_SEL_TGT_CROP_BOUNDS) + return -EINVAL; + + sel->r.left = 0; + sel->r.top = 0; + sel->r.width = state->timings.bt.width; + sel->r.height = state->timings.bt.height; + + return 0; +} + static int adv76xx_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *format) @@ -2381,6 +2367,7 @@ static int adv76xx_subscribe_event(struct v4l2_subdev *sd, static const struct v4l2_ctrl_ops adv76xx_ctrl_ops = { .s_ctrl = adv76xx_s_ctrl, + .g_volatile_ctrl = adv76xx_g_volatile_ctrl, }; static const struct v4l2_subdev_core_ops adv76xx_core_ops = { @@ -2404,6 +2391,7 @@ static const struct v4l2_subdev_video_ops adv76xx_video_ops = { static const struct v4l2_subdev_pad_ops adv76xx_pad_ops = { .enum_mbus_code = adv76xx_enum_mbus_code, + .get_selection = adv76xx_get_selection, .get_fmt = adv76xx_get_format, .set_fmt = adv76xx_set_format, .get_edid = adv76xx_get_edid, @@ -2799,6 +2787,7 @@ static int adv76xx_parse_dt(struct adv76xx_state *state) struct device_node *endpoint; struct device_node *np; unsigned int flags; + int ret; u32 v; np = state->i2c_clients[ADV76XX_PAGE_IO]->dev.of_node; @@ -2808,7 +2797,11 @@ static int adv76xx_parse_dt(struct adv76xx_state *state) if (!endpoint) return -EINVAL; - v4l2_of_parse_endpoint(endpoint, &bus_cfg); + ret = v4l2_of_parse_endpoint(endpoint, &bus_cfg); + if (ret) { + of_node_put(endpoint); + return ret; + } if (!of_property_read_u32(endpoint, "default-input", &v)) state->pdata.default_input = v; @@ -3010,6 +3003,7 @@ static int adv76xx_probe(struct i2c_client *client, V4L2_DV_BT_CEA_640X480P59_94; struct adv76xx_state *state; struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl *ctrl; struct v4l2_subdev *sd; unsigned int i; unsigned int val, val2; @@ -3141,6 +3135,11 @@ static int adv76xx_probe(struct i2c_client *client, V4L2_CID_SATURATION, 0, 255, 1, 128); v4l2_ctrl_new_std(hdl, &adv76xx_ctrl_ops, V4L2_CID_HUE, 0, 128, 1, 0); + ctrl = v4l2_ctrl_new_std_menu(hdl, &adv76xx_ctrl_ops, + V4L2_CID_DV_RX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC, + 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; /* private controls */ state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL, diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 5fbb788e7b59..7ccb85d45224 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -1359,6 +1359,19 @@ static int adv7842_s_ctrl(struct v4l2_ctrl *ctrl) return -EINVAL; } +static int adv7842_g_volatile_ctrl(struct v4l2_ctrl *ctrl) +{ + struct v4l2_subdev *sd = to_sd(ctrl); + + if (ctrl->id == V4L2_CID_DV_RX_IT_CONTENT_TYPE) { + ctrl->val = V4L2_DV_IT_CONTENT_TYPE_NO_ITC; + if ((io_read(sd, 0x60) & 1) && (infoframe_read(sd, 0x03) & 0x80)) + ctrl->val = (infoframe_read(sd, 0x05) >> 4) & 3; + return 0; + } + return -EINVAL; +} + static inline bool no_power(struct v4l2_subdev *sd) { return io_read(sd, 0x0c) & 0x24; @@ -3022,6 +3035,7 @@ static int adv7842_subscribe_event(struct v4l2_subdev *sd, static const struct v4l2_ctrl_ops adv7842_ctrl_ops = { .s_ctrl = adv7842_s_ctrl, + .g_volatile_ctrl = adv7842_g_volatile_ctrl, }; static const struct v4l2_subdev_core_ops adv7842_core_ops = { @@ -3196,6 +3210,7 @@ static int adv7842_probe(struct i2c_client *client, V4L2_DV_BT_CEA_640X480P59_94; struct adv7842_platform_data *pdata = client->dev.platform_data; struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl *ctrl; struct v4l2_subdev *sd; u16 rev; int err; @@ -3261,6 +3276,11 @@ static int adv7842_probe(struct i2c_client *client, V4L2_CID_SATURATION, 0, 255, 1, 128); v4l2_ctrl_new_std(hdl, &adv7842_ctrl_ops, V4L2_CID_HUE, 0, 128, 1, 0); + ctrl = v4l2_ctrl_new_std_menu(hdl, &adv7842_ctrl_ops, + V4L2_CID_DV_RX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC, + 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; /* custom controls */ state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL, diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c index a84561d0d4a8..e016626ebf89 100644 --- a/drivers/media/i2c/msp3400-driver.c +++ b/drivers/media/i2c/msp3400-driver.c @@ -688,6 +688,9 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id) int msp_revision; int msp_product, msp_prod_hi, msp_prod_lo; int msp_rom; +#if defined(CONFIG_MEDIA_CONTROLLER) + int ret; +#endif if (!id) strlcpy(client->name, "msp3400", sizeof(client->name)); @@ -704,6 +707,17 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id) sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &msp_ops); +#if defined(CONFIG_MEDIA_CONTROLLER) + state->pads[IF_AUD_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK; + state->pads[IF_AUD_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_IF_AUD_DECODER; + + ret = media_entity_pads_init(&sd->entity, 2, state->pads); + if (ret < 0) + return ret; +#endif + state->v4l2_std = V4L2_STD_NTSC; state->detected_std = V4L2_STD_ALL; state->audmode = V4L2_TUNER_MODE_STEREO; diff --git a/drivers/media/i2c/msp3400-driver.h b/drivers/media/i2c/msp3400-driver.h index 6cae21366ed5..a8702aca187a 100644 --- a/drivers/media/i2c/msp3400-driver.h +++ b/drivers/media/i2c/msp3400-driver.h @@ -7,6 +7,7 @@ #include <media/drv-intf/msp3400.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> +#include <media/v4l2-mc.h> /* ---------------------------------------------------------------------- */ @@ -102,6 +103,10 @@ struct msp_state { wait_queue_head_t wq; unsigned int restart:1; unsigned int watch_stereo:1; + +#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) + struct media_pad pads[IF_AUD_DEC_PAD_NUM_PADS]; +#endif }; static inline struct msp_state *to_state(struct v4l2_subdev *sd) diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c index b9fea11d6b0b..9ed1b26b6549 100644 --- a/drivers/media/i2c/mt9v011.c +++ b/drivers/media/i2c/mt9v011.c @@ -50,6 +50,9 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)"); struct mt9v011 { struct v4l2_subdev sd; +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_pad pad; +#endif struct v4l2_ctrl_handler ctrls; unsigned width, height; unsigned xtal; @@ -493,6 +496,9 @@ static int mt9v011_probe(struct i2c_client *c, u16 version; struct mt9v011 *core; struct v4l2_subdev *sd; +#ifdef CONFIG_MEDIA_CONTROLLER + int ret; +#endif /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(c->adapter, @@ -506,6 +512,15 @@ static int mt9v011_probe(struct i2c_client *c, sd = &core->sd; v4l2_i2c_subdev_init(sd, c, &mt9v011_ops); +#ifdef CONFIG_MEDIA_CONTROLLER + core->pad.flags = MEDIA_PAD_FL_SOURCE; + sd->entity.function = MEDIA_ENT_F_CAM_SENSOR; + + ret = media_entity_pads_init(&sd->entity, 1, &core->pad); + if (ret < 0) + return ret; +#endif + /* Check if the sensor is really a MT9V011 */ version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION); if ((version != MT9V011_VERSION) && diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c index 2e1d116a64e7..501b37039449 100644 --- a/drivers/media/i2c/mt9v032.c +++ b/drivers/media/i2c/mt9v032.c @@ -14,6 +14,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/log2.h> #include <linux/mutex.h> @@ -251,6 +252,8 @@ struct mt9v032 { struct regmap *regmap; struct clk *clk; + struct gpio_desc *reset_gpio; + struct gpio_desc *standby_gpio; struct mt9v032_platform_data *pdata; const struct mt9v032_model_info *model; @@ -312,16 +315,31 @@ static int mt9v032_power_on(struct mt9v032 *mt9v032) struct regmap *map = mt9v032->regmap; int ret; + if (mt9v032->reset_gpio) + gpiod_set_value_cansleep(mt9v032->reset_gpio, 1); + ret = clk_set_rate(mt9v032->clk, mt9v032->sysclk); if (ret < 0) return ret; + /* System clock has to be enabled before releasing the reset */ ret = clk_prepare_enable(mt9v032->clk); if (ret) return ret; udelay(1); + if (mt9v032->reset_gpio) { + gpiod_set_value_cansleep(mt9v032->reset_gpio, 0); + + /* After releasing reset we need to wait 10 clock cycles + * before accessing the sensor over I2C. As the minimum SYSCLK + * frequency is 13MHz, waiting 1µs will be enough in the worst + * case. + */ + udelay(1); + } + /* Reset the chip and stop data read out */ ret = regmap_write(map, MT9V032_RESET, 1); if (ret < 0) @@ -954,6 +972,16 @@ static int mt9v032_probe(struct i2c_client *client, if (IS_ERR(mt9v032->clk)) return PTR_ERR(mt9v032->clk); + mt9v032->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(mt9v032->reset_gpio)) + return PTR_ERR(mt9v032->reset_gpio); + + mt9v032->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby", + GPIOD_OUT_LOW); + if (IS_ERR(mt9v032->standby_gpio)) + return PTR_ERR(mt9v032->standby_gpio); + mutex_init(&mt9v032->power_lock); mt9v032->pdata = pdata; mt9v032->model = (const void *)did->driver_data; diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c index 02b9a3440557..1f999e9c0118 100644 --- a/drivers/media/i2c/ov2659.c +++ b/drivers/media/i2c/ov2659.c @@ -1321,10 +1321,6 @@ static int ov2659_detect(struct v4l2_subdev *sd) } usleep_range(1000, 2000); - ret = ov2659_init(sd, 0); - if (ret < 0) - return ret; - /* Check sensor revision */ ret = ov2659_read(client, REG_SC_CHIP_ID_H, &pid); if (!ret) @@ -1338,8 +1334,10 @@ static int ov2659_detect(struct v4l2_subdev *sd) dev_err(&client->dev, "Sensor detection failed (%04X, %d)\n", id, ret); - else + else { dev_info(&client->dev, "Found OV%04X sensor\n", id); + ret = ov2659_init(sd, 0); + } } return ret; diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c index a0b3c9bde53d..be5a7fd4f076 100644 --- a/drivers/media/i2c/ov9650.c +++ b/drivers/media/i2c/ov9650.c @@ -1046,8 +1046,8 @@ static int ov965x_initialize_controls(struct ov965x *ov965x) ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE; v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false); - v4l2_ctrl_auto_cluster(3, &ctrls->auto_gain, 0, true); - v4l2_ctrl_auto_cluster(3, &ctrls->auto_exp, 1, true); + v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true); + v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true); v4l2_ctrl_cluster(2, &ctrls->hflip); ov965x->sd.ctrl_handler = hdl; diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index 57b3d27993a4..08af58fb8e7d 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1639,8 +1639,10 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state) return 0; } - v4l2_of_parse_endpoint(node_ep, &ep); + ret = v4l2_of_parse_endpoint(node_ep, &ep); of_node_put(node_ep); + if (ret) + return ret; if (ep.bus_type != V4L2_MBUS_CSI2) { dev_err(dev, "unsupported bus type\n"); diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c index 7d65b36434b1..72ef9f936e6c 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c @@ -37,7 +37,6 @@ enum spi_direction { SPI_DIR_RX, SPI_DIR_TX }; -MODULE_DEVICE_TABLE(of, s5c73m3_spi_ids); static int spi_xmit(struct spi_device *spi_dev, void *addr, const int len, enum spi_direction dir) diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c index fc3a5a8e6c9c..db82ed05792e 100644 --- a/drivers/media/i2c/s5k5baf.c +++ b/drivers/media/i2c/s5k5baf.c @@ -1868,8 +1868,11 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev) return -EINVAL; } - v4l2_of_parse_endpoint(node_ep, &ep); + ret = v4l2_of_parse_endpoint(node_ep, &ep); of_node_put(node_ep); + if (ret) + return ret; + state->bus_type = ep.bus_type; switch (state->bus_type) { diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c index 24d2b76dbe97..d2a1ce2bc7f5 100644 --- a/drivers/media/i2c/saa7115.c +++ b/drivers/media/i2c/saa7115.c @@ -46,6 +46,7 @@ #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> +#include <media/v4l2-mc.h> #include <media/i2c/saa7115.h> #include <asm/div64.h> @@ -74,6 +75,9 @@ enum saa711x_model { struct saa711x_state { struct v4l2_subdev sd; +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_pad pads[DEMOD_NUM_PADS]; +#endif struct v4l2_ctrl_handler hdl; struct { @@ -1809,6 +1813,9 @@ static int saa711x_probe(struct i2c_client *client, struct saa7115_platform_data *pdata; int ident; char name[CHIP_VER_SIZE + 1]; +#if defined(CONFIG_MEDIA_CONTROLLER) + int ret; +#endif /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) @@ -1832,6 +1839,18 @@ static int saa711x_probe(struct i2c_client *client, sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &saa711x_ops); +#if defined(CONFIG_MEDIA_CONTROLLER) + state->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK; + state->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE; + state->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_ATV_DECODER; + + ret = media_entity_pads_init(&sd->entity, DEMOD_NUM_PADS, state->pads); + if (ret < 0) + return ret; +#endif + v4l_info(client, "%s found @ 0x%x (%s)\n", name, client->addr << 1, client->adapter->name); hdl = &state->hdl; diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c index 2e14e52ba2e0..69becc358659 100644 --- a/drivers/media/i2c/soc_camera/mt9m001.c +++ b/drivers/media/i2c/soc_camera/mt9m001.c @@ -632,7 +632,7 @@ static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = { .s_mbus_config = mt9m001_s_mbus_config, }; -static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = { +static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = { .g_skip_top_lines = mt9m001_g_skip_top_lines, }; diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c index 3b6eeed2e2b9..5c8e3ffe3b27 100644 --- a/drivers/media/i2c/soc_camera/mt9t031.c +++ b/drivers/media/i2c/soc_camera/mt9t031.c @@ -728,7 +728,7 @@ static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = { .s_mbus_config = mt9t031_s_mbus_config, }; -static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = { +static const struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = { .g_skip_top_lines = mt9t031_g_skip_top_lines, }; diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c index c2ba1fb3694d..2721e583bfa0 100644 --- a/drivers/media/i2c/soc_camera/mt9v022.c +++ b/drivers/media/i2c/soc_camera/mt9v022.c @@ -860,7 +860,7 @@ static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = { .s_mbus_config = mt9v022_s_mbus_config, }; -static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = { +static const struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = { .g_skip_top_lines = mt9v022_g_skip_top_lines, }; diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index 3397eb99c67b..da7469bc6e56 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -59,8 +59,7 @@ MODULE_LICENSE("GPL"); #define EDID_NUM_BLOCKS_MAX 8 #define EDID_BLOCK_SIZE 128 -/* Max transfer size done by I2C transfer functions */ -#define MAX_XFER_SIZE (EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE + 2) +#define I2C_MAX_XFER_SIZE (EDID_BLOCK_SIZE + 2) static const struct v4l2_dv_timings_cap tc358743_timings_cap = { .type = V4L2_DV_BT_656_1120, @@ -97,9 +96,6 @@ struct tc358743_state { /* edid */ u8 edid_blocks_written; - /* used by i2c_wr() */ - u8 wr_data[MAX_XFER_SIZE]; - struct v4l2_dv_timings timings; u32 mbus_fmt_code; @@ -149,13 +145,15 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) { struct tc358743_state *state = to_state(sd); struct i2c_client *client = state->i2c_client; - u8 *data = state->wr_data; int err, i; struct i2c_msg msg; + u8 data[I2C_MAX_XFER_SIZE]; - if ((2 + n) > sizeof(state->wr_data)) + if ((2 + n) > I2C_MAX_XFER_SIZE) { + n = I2C_MAX_XFER_SIZE - 2; v4l2_warn(sd, "i2c wr reg=%04x: len=%d is too big!\n", reg, 2 + n); + } msg.addr = client->addr; msg.buf = data; @@ -859,15 +857,16 @@ static void tc358743_format_change(struct v4l2_subdev *sd) if (tc358743_get_detected_timings(sd, &timings)) { enable_stream(sd, false); - v4l2_dbg(1, debug, sd, "%s: Format changed. No signal\n", + v4l2_dbg(1, debug, sd, "%s: No signal\n", __func__); } else { if (!v4l2_match_dv_timings(&state->timings, &timings, 0, false)) enable_stream(sd, false); - v4l2_print_dv_timings(sd->name, - "tc358743_format_change: Format changed. New format: ", - &timings, false); + if (debug) + v4l2_print_dv_timings(sd->name, + "tc358743_format_change: New format: ", + &timings, false); } if (sd->devnode) @@ -1581,6 +1580,7 @@ static int tc358743_s_edid(struct v4l2_subdev *sd, { struct tc358743_state *state = to_state(sd); u16 edid_len = edid->blocks * EDID_BLOCK_SIZE; + int i; v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n", __func__, edid->pad, edid->start_block, edid->blocks); @@ -1606,7 +1606,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd, return 0; } - i2c_wr(sd, EDID_RAM, edid->edid, edid_len); + for (i = 0; i < edid_len; i += EDID_BLOCK_SIZE) + i2c_wr(sd, EDID_RAM + i, edid->edid + i, EDID_BLOCK_SIZE); state->edid_blocks_written = edid->blocks; diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c index 7fa5f1e4fe37..7cdd94842938 100644 --- a/drivers/media/i2c/tvp514x.c +++ b/drivers/media/i2c/tvp514x.c @@ -1001,7 +1001,7 @@ static struct tvp514x_decoder tvp514x_dev = { static struct tvp514x_platform_data * tvp514x_get_pdata(struct i2c_client *client) { - struct tvp514x_platform_data *pdata; + struct tvp514x_platform_data *pdata = NULL; struct v4l2_of_endpoint bus_cfg; struct device_node *endpoint; unsigned int flags; @@ -1013,11 +1013,13 @@ tvp514x_get_pdata(struct i2c_client *client) if (!endpoint) return NULL; + if (v4l2_of_parse_endpoint(endpoint, &bus_cfg)) + goto done; + pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) goto done; - v4l2_of_parse_endpoint(endpoint, &bus_cfg); flags = bus_cfg.bus.parallel.flags; if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 6c3769d44b75..ef393f5daf2a 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -1,19 +1,22 @@ /* - * tvp5150 - Texas Instruments TVP5150A/AM1 video decoder driver + * tvp5150 - Texas Instruments TVP5150A/AM1 and TVP5151 video decoder driver * * Copyright (c) 2005,2006 Mauro Carvalho Chehab (mchehab@infradead.org) * This code is placed under the terms of the GNU General Public License v2 */ +#include <dt-bindings/media/tvp5150.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <media/v4l2-async.h> #include <media/v4l2-device.h> -#include <media/i2c/tvp5150.h> #include <media/v4l2-ctrls.h> +#include <media/v4l2-of.h> +#include <media/v4l2-mc.h> #include "tvp5150_reg.h" @@ -24,7 +27,7 @@ #define TVP5150_MAX_CROP_TOP 127 #define TVP5150_CROP_SHIFT 2 -MODULE_DESCRIPTION("Texas Instruments TVP5150A video decoder driver"); +MODULE_DESCRIPTION("Texas Instruments TVP5150A/TVP5150AM1/TVP5151 video decoder driver"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_LICENSE("GPL"); @@ -35,6 +38,11 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)"); struct tvp5150 { struct v4l2_subdev sd; +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_pad pads[DEMOD_NUM_PADS]; + struct media_entity input_ent[TVP5150_INPUT_NUM]; + struct media_pad input_pad[TVP5150_INPUT_NUM]; +#endif struct v4l2_ctrl_handler hdl; struct v4l2_rect rect; @@ -42,6 +50,11 @@ struct tvp5150 { u32 input; u32 output; int enable; + + u16 dev_id; + u16 rom_ver; + + enum v4l2_mbus_type mbus_type; }; static inline struct tvp5150 *to_tvp5150(struct v4l2_subdev *sd) @@ -246,8 +259,12 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd) int input = 0; int val; - if ((decoder->output & TVP5150_BLACK_SCREEN) || !decoder->enable) - input = 8; + /* Only tvp5150am1 and tvp5151 have signal generator support */ + if ((decoder->dev_id == 0x5150 && decoder->rom_ver == 0x0400) || + (decoder->dev_id == 0x5151 && decoder->rom_ver == 0x0100)) { + if (!decoder->enable) + input = 8; + } switch (decoder->input) { case TVP5150_COMPOSITE1: @@ -772,12 +789,17 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val) v4l2_ctrl_handler_setup(&decoder->hdl); tvp5150_set_std(sd, decoder->norm); + + if (decoder->mbus_type == V4L2_MBUS_PARALLEL) + tvp5150_write(sd, TVP5150_DATA_RATE_SEL, 0x40); + return 0; }; static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); + struct tvp5150 *decoder = to_tvp5150(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: @@ -791,6 +813,9 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl) return 0; case V4L2_CID_HUE: tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val); + case V4L2_CID_TEST_PATTERN: + decoder->enable = ctrl->val ? false : true; + tvp5150_selmux(sd); return 0; } return -EINVAL; @@ -818,17 +843,6 @@ static v4l2_std_id tvp5150_read_std(struct v4l2_subdev *sd) } } -static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->pad || code->index) - return -EINVAL; - - code->code = MEDIA_BUS_FMT_UYVY8_2X8; - return 0; -} - static int tvp5150_fill_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *format) @@ -844,10 +858,10 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd, tvp5150_reset(sd, 0); f->width = decoder->rect.width; - f->height = decoder->rect.height; + f->height = decoder->rect.height / 2; f->code = MEDIA_BUS_FMT_UYVY8_2X8; - f->field = V4L2_FIELD_SEQ_TB; + f->field = V4L2_FIELD_ALTERNATE; f->colorspace = V4L2_COLORSPACE_SMPTE170M; v4l2_dbg(1, debug, sd, "width = %d, height = %d\n", f->width, @@ -948,10 +962,110 @@ static int tvp5150_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) return 0; } +static int tvp5150_g_mbus_config(struct v4l2_subdev *sd, + struct v4l2_mbus_config *cfg) +{ + struct tvp5150 *decoder = to_tvp5150(sd); + + cfg->type = decoder->mbus_type; + cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING + | V4L2_MBUS_FIELD_EVEN_LOW | V4L2_MBUS_DATA_ACTIVE_HIGH; + + return 0; +} + +/**************************************************************************** + V4L2 subdev pad ops + ****************************************************************************/ +static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (code->pad || code->index) + return -EINVAL; + + code->code = MEDIA_BUS_FMT_UYVY8_2X8; + return 0; +} + +static int tvp5150_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct tvp5150 *decoder = to_tvp5150(sd); + + if (fse->index >= 8 || fse->code != MEDIA_BUS_FMT_UYVY8_2X8) + return -EINVAL; + + fse->code = MEDIA_BUS_FMT_UYVY8_2X8; + fse->min_width = decoder->rect.width; + fse->max_width = decoder->rect.width; + fse->min_height = decoder->rect.height / 2; + fse->max_height = decoder->rect.height / 2; + + return 0; +} + +/**************************************************************************** + Media entity ops + ****************************************************************************/ + +static int tvp5150_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ +#ifdef CONFIG_MEDIA_CONTROLLER + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct tvp5150 *decoder = to_tvp5150(sd); + int i; + + for (i = 0; i < TVP5150_INPUT_NUM; i++) { + if (remote->entity == &decoder->input_ent[i]) + break; + } + + /* Do nothing for entities that are not input connectors */ + if (i == TVP5150_INPUT_NUM) + return 0; + + decoder->input = i; + + tvp5150_selmux(sd); +#endif + + return 0; +} + +static const struct media_entity_operations tvp5150_sd_media_ops = { + .link_setup = tvp5150_link_setup, +}; + /**************************************************************************** I2C Command ****************************************************************************/ +static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct tvp5150 *decoder = to_tvp5150(sd); + /* Output format: 8-bit ITU-R BT.656 with embedded syncs */ + int val = 0x09; + + /* Output format: 8-bit 4:2:2 YUV with discrete sync */ + if (decoder->mbus_type == V4L2_MBUS_PARALLEL) + val = 0x0d; + + /* Initializes TVP5150 to its default values */ + /* # set PCLK (27MHz) */ + tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00); + + if (enable) + tvp5150_write(sd, TVP5150_MISC_CTL, val); + else + tvp5150_write(sd, TVP5150_MISC_CTL, 0x00); + + return 0; +} + static int tvp5150_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { @@ -959,6 +1073,12 @@ static int tvp5150_s_routing(struct v4l2_subdev *sd, decoder->input = input; decoder->output = output; + + if (output == TVP5150_BLACK_SCREEN) + decoder->enable = false; + else + decoder->enable = true; + tvp5150_selmux(sd); return 0; } @@ -1052,6 +1172,42 @@ static int tvp5150_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) return 0; } +static int tvp5150_registered_async(struct v4l2_subdev *sd) +{ +#ifdef CONFIG_MEDIA_CONTROLLER + struct tvp5150 *decoder = to_tvp5150(sd); + int ret = 0; + int i; + + for (i = 0; i < TVP5150_INPUT_NUM; i++) { + struct media_entity *input = &decoder->input_ent[i]; + struct media_pad *pad = &decoder->input_pad[i]; + + if (!input->name) + continue; + + decoder->input_pad[i].flags = MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(input, 1, pad); + if (ret < 0) + return ret; + + ret = media_device_register_entity(sd->v4l2_dev->mdev, input); + if (ret < 0) + return ret; + + ret = media_create_pad_link(input, 0, &sd->entity, + DEMOD_PAD_IF_INPUT, 0); + if (ret < 0) { + media_device_unregister_entity(input); + return ret; + } + } +#endif + + return 0; +} + /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops tvp5150_ctrl_ops = { @@ -1065,6 +1221,7 @@ static const struct v4l2_subdev_core_ops tvp5150_core_ops = { .g_register = tvp5150_g_register, .s_register = tvp5150_s_register, #endif + .registered_async = tvp5150_registered_async, }; static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = { @@ -1073,10 +1230,12 @@ static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = { static const struct v4l2_subdev_video_ops tvp5150_video_ops = { .s_std = tvp5150_s_std, + .s_stream = tvp5150_s_stream, .s_routing = tvp5150_s_routing, .s_crop = tvp5150_s_crop, .g_crop = tvp5150_g_crop, .cropcap = tvp5150_cropcap, + .g_mbus_config = tvp5150_g_mbus_config, }; static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = { @@ -1088,6 +1247,7 @@ static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = { static const struct v4l2_subdev_pad_ops tvp5150_pad_ops = { .enum_mbus_code = tvp5150_enum_mbus_code, + .enum_frame_size = tvp5150_enum_frame_size, .set_fmt = tvp5150_fill_fmt, .get_fmt = tvp5150_fill_fmt, }; @@ -1105,63 +1265,239 @@ static const struct v4l2_subdev_ops tvp5150_ops = { I2C Client & Driver ****************************************************************************/ +static int tvp5150_detect_version(struct tvp5150 *core) +{ + struct v4l2_subdev *sd = &core->sd; + struct i2c_client *c = v4l2_get_subdevdata(sd); + unsigned int i; + u8 regs[4]; + int res; + + /* + * Read consequent registers - TVP5150_MSB_DEV_ID, TVP5150_LSB_DEV_ID, + * TVP5150_ROM_MAJOR_VER, TVP5150_ROM_MINOR_VER + */ + for (i = 0; i < 4; i++) { + res = tvp5150_read(sd, TVP5150_MSB_DEV_ID + i); + if (res < 0) + return res; + regs[i] = res; + } + + core->dev_id = (regs[0] << 8) | regs[1]; + core->rom_ver = (regs[2] << 8) | regs[3]; + + v4l2_info(sd, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n", + core->dev_id, regs[2], regs[3], c->addr << 1, + c->adapter->name); + + if (core->dev_id == 0x5150 && core->rom_ver == 0x0321) { + v4l2_info(sd, "tvp5150a detected.\n"); + } else if (core->dev_id == 0x5150 && core->rom_ver == 0x0400) { + v4l2_info(sd, "tvp5150am1 detected.\n"); + + /* ITU-T BT.656.4 timing */ + tvp5150_write(sd, TVP5150_REV_SELECT, 0); + } else if (core->dev_id == 0x5151 && core->rom_ver == 0x0100) { + v4l2_info(sd, "tvp5151 detected.\n"); + } else { + v4l2_info(sd, "*** unknown tvp%04x chip detected.\n", + core->dev_id); + } + + return 0; +} + +static int tvp5150_init(struct i2c_client *c) +{ + struct gpio_desc *pdn_gpio; + struct gpio_desc *reset_gpio; + + pdn_gpio = devm_gpiod_get_optional(&c->dev, "pdn", GPIOD_OUT_HIGH); + if (IS_ERR(pdn_gpio)) + return PTR_ERR(pdn_gpio); + + if (pdn_gpio) { + gpiod_set_value_cansleep(pdn_gpio, 0); + /* Delay time between power supplies active and reset */ + msleep(20); + } + + reset_gpio = devm_gpiod_get_optional(&c->dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(reset_gpio)) + return PTR_ERR(reset_gpio); + + if (reset_gpio) { + /* RESETB pulse duration */ + ndelay(500); + gpiod_set_value_cansleep(reset_gpio, 0); + /* Delay time between end of reset to I2C active */ + usleep_range(200, 250); + } + + return 0; +} + +static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np) +{ + struct v4l2_of_endpoint bus_cfg; + struct device_node *ep; +#ifdef CONFIG_MEDIA_CONTROLLER + struct device_node *connectors, *child; + struct media_entity *input; + const char *name; + u32 input_type; +#endif + unsigned int flags; + int ret = 0; + + ep = of_graph_get_next_endpoint(np, NULL); + if (!ep) + return -EINVAL; + + ret = v4l2_of_parse_endpoint(ep, &bus_cfg); + if (ret) + goto err; + + flags = bus_cfg.bus.parallel.flags; + + if (bus_cfg.bus_type == V4L2_MBUS_PARALLEL && + !(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH && + flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH && + flags & V4L2_MBUS_FIELD_EVEN_LOW)) { + ret = -EINVAL; + goto err; + } + + decoder->mbus_type = bus_cfg.bus_type; + +#ifdef CONFIG_MEDIA_CONTROLLER + connectors = of_get_child_by_name(np, "connectors"); + + if (!connectors) + goto err; + + for_each_available_child_of_node(connectors, child) { + ret = of_property_read_u32(child, "input", &input_type); + if (ret) { + v4l2_err(&decoder->sd, + "missing type property in node %s\n", + child->name); + goto err_connector; + } + + if (input_type > TVP5150_INPUT_NUM) { + ret = -EINVAL; + goto err_connector; + } + + input = &decoder->input_ent[input_type]; + + /* Each input connector can only be defined once */ + if (input->name) { + v4l2_err(&decoder->sd, + "input %s with same type already exists\n", + input->name); + ret = -EINVAL; + goto err_connector; + } + + switch (input_type) { + case TVP5150_COMPOSITE0: + case TVP5150_COMPOSITE1: + input->function = MEDIA_ENT_F_CONN_COMPOSITE; + break; + case TVP5150_SVIDEO: + input->function = MEDIA_ENT_F_CONN_SVIDEO; + break; + } + + input->flags = MEDIA_ENT_FL_CONNECTOR; + + ret = of_property_read_string(child, "label", &name); + if (ret < 0) { + v4l2_err(&decoder->sd, + "missing label property in node %s\n", + child->name); + goto err_connector; + } + + input->name = name; + } + +err_connector: + of_node_put(connectors); +#endif +err: + of_node_put(ep); + return ret; +} + +static const char * const tvp5150_test_patterns[2] = { + "Disabled", + "Black screen" +}; + static int tvp5150_probe(struct i2c_client *c, const struct i2c_device_id *id) { struct tvp5150 *core; struct v4l2_subdev *sd; - int tvp5150_id[4]; - int i, res; + struct device_node *np = c->dev.of_node; + int res; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(c->adapter, I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) return -EIO; + res = tvp5150_init(c); + if (res) + return res; + core = devm_kzalloc(&c->dev, sizeof(*core), GFP_KERNEL); if (!core) return -ENOMEM; + sd = &core->sd; - v4l2_i2c_subdev_init(sd, c, &tvp5150_ops); - /* - * Read consequent registers - TVP5150_MSB_DEV_ID, TVP5150_LSB_DEV_ID, - * TVP5150_ROM_MAJOR_VER, TVP5150_ROM_MINOR_VER - */ - for (i = 0; i < 4; i++) { - res = tvp5150_read(sd, TVP5150_MSB_DEV_ID + i); - if (res < 0) + if (IS_ENABLED(CONFIG_OF) && np) { + res = tvp5150_parse_dt(core, np); + if (res) { + v4l2_err(sd, "DT parsing error: %d\n", res); return res; - tvp5150_id[i] = res; + } + } else { + /* Default to BT.656 embedded sync */ + core->mbus_type = V4L2_MBUS_BT656; } - v4l_info(c, "chip found @ 0x%02x (%s)\n", - c->addr << 1, c->adapter->name); + v4l2_i2c_subdev_init(sd, c, &tvp5150_ops); + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - if (tvp5150_id[2] == 4 && tvp5150_id[3] == 0) { /* Is TVP5150AM1 */ - v4l2_info(sd, "tvp%02x%02xam1 detected.\n", - tvp5150_id[0], tvp5150_id[1]); +#if defined(CONFIG_MEDIA_CONTROLLER) + core->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK; + core->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE; + core->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE; - /* ITU-T BT.656.4 timing */ - tvp5150_write(sd, TVP5150_REV_SELECT, 0); - } else { - /* Is TVP5150A */ - if (tvp5150_id[2] == 3 || tvp5150_id[3] == 0x21) { - v4l2_info(sd, "tvp%02x%02xa detected.\n", - tvp5150_id[0], tvp5150_id[1]); - } else { - v4l2_info(sd, "*** unknown tvp%02x%02x chip detected.\n", - tvp5150_id[0], tvp5150_id[1]); - v4l2_info(sd, "*** Rom ver is %d.%d\n", - tvp5150_id[2], tvp5150_id[3]); - } - } + sd->entity.function = MEDIA_ENT_F_ATV_DECODER; + + res = media_entity_pads_init(&sd->entity, DEMOD_NUM_PADS, core->pads); + if (res < 0) + return res; + + sd->entity.ops = &tvp5150_sd_media_ops; +#endif + + res = tvp5150_detect_version(core); + if (res < 0) + return res; core->norm = V4L2_STD_ALL; /* Default is autodetect */ core->input = TVP5150_COMPOSITE1; - core->enable = 1; + core->enable = true; - v4l2_ctrl_handler_init(&core->hdl, 4); + v4l2_ctrl_handler_init(&core->hdl, 5); v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops, @@ -1170,6 +1506,13 @@ static int tvp5150_probe(struct i2c_client *c, V4L2_CID_SATURATION, 0, 255, 1, 128); v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); + v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops, + V4L2_CID_PIXEL_RATE, 27000000, + 27000000, 1, 27000000); + v4l2_ctrl_new_std_menu_items(&core->hdl, &tvp5150_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(tvp5150_test_patterns), + 0, 0, tvp5150_test_patterns); sd->ctrl_handler = &core->hdl; if (core->hdl.error) { res = core->hdl.error; @@ -1221,8 +1564,17 @@ static const struct i2c_device_id tvp5150_id[] = { }; MODULE_DEVICE_TABLE(i2c, tvp5150_id); +#if IS_ENABLED(CONFIG_OF) +static const struct of_device_id tvp5150_of_match[] = { + { .compatible = "ti,tvp5150", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, tvp5150_of_match); +#endif + static struct i2c_driver tvp5150_driver = { .driver = { + .of_match_table = of_match_ptr(tvp5150_of_match), .name = "tvp5150", }, .probe = tvp5150_probe, diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c index 83c79fa5f61d..4df640c3aa40 100644 --- a/drivers/media/i2c/tvp7002.c +++ b/drivers/media/i2c/tvp7002.c @@ -894,7 +894,7 @@ static struct tvp7002_config * tvp7002_get_pdata(struct i2c_client *client) { struct v4l2_of_endpoint bus_cfg; - struct tvp7002_config *pdata; + struct tvp7002_config *pdata = NULL; struct device_node *endpoint; unsigned int flags; @@ -905,11 +905,13 @@ tvp7002_get_pdata(struct i2c_client *client) if (!endpoint) return NULL; + if (v4l2_of_parse_endpoint(endpoint, &bus_cfg)) + goto done; + pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) goto done; - v4l2_of_parse_endpoint(endpoint, &bus_cfg); flags = bus_cfg.bus.parallel.flags; if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c index 4b564f17f618..90b693f4e2ab 100644 --- a/drivers/media/i2c/vpx3220.c +++ b/drivers/media/i2c/vpx3220.c @@ -124,7 +124,7 @@ static int vpx3220_fp_write(struct v4l2_subdev *sd, u8 fpaddr, u16 data) return 0; } -static u16 vpx3220_fp_read(struct v4l2_subdev *sd, u16 fpaddr) +static int vpx3220_fp_read(struct v4l2_subdev *sd, u16 fpaddr) { struct i2c_client *client = v4l2_get_subdevdata(sd); s16 data; diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 7dae0ac0f3ae..5ebb3cd31345 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -55,7 +55,11 @@ static int media_device_get_info(struct media_device *dev, memset(&info, 0, sizeof(info)); - strlcpy(info.driver, dev->dev->driver->name, sizeof(info.driver)); + if (dev->driver_name[0]) + strlcpy(info.driver, dev->driver_name, sizeof(info.driver)); + else + strlcpy(info.driver, dev->dev->driver->name, sizeof(info.driver)); + strlcpy(info.model, dev->model, sizeof(info.model)); strlcpy(info.serial, dev->serial, sizeof(info.serial)); strlcpy(info.bus_info, dev->bus_info, sizeof(info.bus_info)); @@ -234,7 +238,6 @@ static long media_device_setup_link(struct media_device *mdev, return ret; } -#if 0 /* Let's postpone it to Kernel 4.6 */ static long __media_device_get_topology(struct media_device *mdev, struct media_v2_topology *topo) { @@ -390,7 +393,6 @@ static long media_device_get_topology(struct media_device *mdev, return 0; } -#endif static long media_device_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) @@ -424,14 +426,13 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd, mutex_unlock(&dev->graph_mutex); break; -#if 0 /* Let's postpone it to Kernel 4.6 */ case MEDIA_IOC_G_TOPOLOGY: mutex_lock(&dev->graph_mutex); ret = media_device_get_topology(dev, (struct media_v2_topology __user *)arg); mutex_unlock(&dev->graph_mutex); break; -#endif + default: ret = -ENOIOCTLCMD; } @@ -480,9 +481,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd, case MEDIA_IOC_DEVICE_INFO: case MEDIA_IOC_ENUM_ENTITIES: case MEDIA_IOC_SETUP_LINK: -#if 0 /* Let's postpone it to Kernel 4.6 */ case MEDIA_IOC_G_TOPOLOGY: -#endif return media_device_ioctl(filp, cmd, arg); case MEDIA_IOC_ENUM_LINKS32: diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c index cea35bf20011..29409f440f1c 100644 --- a/drivers/media/media-devnode.c +++ b/drivers/media/media-devnode.c @@ -181,6 +181,7 @@ static int media_open(struct inode *inode, struct file *filp) ret = mdev->fops->open(filp); if (ret) { put_device(&mdev->dev); + filp->private_data = NULL; return ret; } } diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c index e89d85a7d31b..f2e43603d6d2 100644 --- a/drivers/media/media-entity.c +++ b/drivers/media/media-entity.c @@ -452,9 +452,12 @@ error: media_entity_graph_walk_start(graph, entity_err); while ((entity_err = media_entity_graph_walk_next(graph))) { - entity_err->stream_count--; - if (entity_err->stream_count == 0) - entity_err->pipe = NULL; + /* don't let the stream_count go negative */ + if (entity->stream_count > 0) { + entity_err->stream_count--; + if (entity_err->stream_count == 0) + entity_err->pipe = NULL; + } /* * We haven't increased stream_count further than this @@ -486,9 +489,12 @@ void media_entity_pipeline_stop(struct media_entity *entity) media_entity_graph_walk_start(graph, entity); while ((entity = media_entity_graph_walk_next(graph))) { - entity->stream_count--; - if (entity->stream_count == 0) - entity->pipe = NULL; + /* don't let the stream_count go negative */ + if (entity->stream_count > 0) { + entity->stream_count--; + if (entity->stream_count == 0) + entity->pipe = NULL; + } } if (!--pipe->streaming_count) diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c index 8b5e0b3a92a0..4cac1fc233f2 100644 --- a/drivers/media/pci/b2c2/flexcop-pci.c +++ b/drivers/media/pci/b2c2/flexcop-pci.c @@ -39,7 +39,7 @@ MODULE_PARM_DESC(debug, #define DRIVER_VERSION "0.1" #define DRIVER_NAME "flexcop-pci" -#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>" +#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>" struct flexcop_pci { struct pci_dev *pdev; diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index 9400e996087b..2c412377507b 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -186,7 +186,7 @@ MODULE_VERSION(BTTV_VERSION); static ssize_t show_card(struct device *cd, struct device_attribute *attr, char *buf) { - struct video_device *vfd = container_of(cd, struct video_device, dev); + struct video_device *vfd = to_video_device(cd); struct bttv *btv = video_get_drvdata(vfd); return sprintf(buf, "%d\n", btv ? btv->c.type : UNSET); } @@ -1726,22 +1726,15 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id id) struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; unsigned int i; - int err = 0; for (i = 0; i < BTTV_TVNORMS; i++) if (id & bttv_tvnorms[i].v4l2_id) break; - if (i == BTTV_TVNORMS) { - err = -EINVAL; - goto err; - } - + if (i == BTTV_TVNORMS) + return -EINVAL; btv->std = id; set_tvnorm(btv, i); - -err: - - return err; + return 0; } static int bttv_g_std(struct file *file, void *priv, v4l2_std_id *id) @@ -1770,12 +1763,9 @@ static int bttv_enum_input(struct file *file, void *priv, { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; - int rc = 0; - if (i->index >= bttv_tvcards[btv->c.type].video_inputs) { - rc = -EINVAL; - goto err; - } + if (i->index >= bttv_tvcards[btv->c.type].video_inputs) + return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; i->audioset = 0; @@ -1799,10 +1789,7 @@ static int bttv_enum_input(struct file *file, void *priv, } i->std = BTTV_NORMS; - -err: - - return rc; + return 0; } static int bttv_g_input(struct file *file, void *priv, unsigned int *i) diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c index 4a90eee5e3bb..35bc9b2287b4 100644 --- a/drivers/media/pci/bt8xx/dst.c +++ b/drivers/media/pci/bt8xx/dst.c @@ -1688,9 +1688,9 @@ static int dst_get_tuning_algo(struct dvb_frontend *fe) return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW; } -static int dst_get_frontend(struct dvb_frontend *fe) +static int dst_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct dst_state *state = fe->demodulator_priv; p->frequency = state->decode_freq; diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c index d407244fd1bc..e69d338ab9be 100644 --- a/drivers/media/pci/bt8xx/dvb-bt8xx.c +++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c @@ -318,7 +318,7 @@ static int microtune_mt7202dtf_request_firmware(struct dvb_frontend* fe, const s return request_firmware(fw, name, &bt->bt->dev->dev); } -static struct sp887x_config microtune_mt7202dtf_config = { +static const struct sp887x_config microtune_mt7202dtf_config = { .demod_address = 0x70, .request_firmware = microtune_mt7202dtf_request_firmware, }; @@ -458,7 +458,7 @@ static void or51211_sleep(struct dvb_frontend * fe) bttv_write_gpio(bt->bttv_nr, 0x0001, 0x0000); } -static struct or51211_config or51211_config = { +static const struct or51211_config or51211_config = { .demod_address = 0x15, .request_firmware = or51211_request_firmware, .setmode = or51211_setmode, diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c index 80319bb73d94..5131c9f555fb 100644 --- a/drivers/media/pci/cx23885/cx23885-dvb.c +++ b/drivers/media/pci/cx23885/cx23885-dvb.c @@ -2301,7 +2301,8 @@ static int dvb_register(struct cx23885_tsport *port) /* register everything */ ret = vb2_dvb_register_bus(&port->frontends, THIS_MODULE, port, - &dev->pci->dev, adapter_nr, mfe_shared); + &dev->pci->dev, NULL, + adapter_nr, mfe_shared); if (ret) goto frontend_detach; diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c index afb20756d7a5..851d2a9caed3 100644 --- a/drivers/media/pci/cx88/cx88-dvb.c +++ b/drivers/media/pci/cx88/cx88-dvb.c @@ -1642,7 +1642,8 @@ static int dvb_register(struct cx8802_dev *dev) /* register everything */ res = vb2_dvb_register_bus(&dev->frontends, THIS_MODULE, dev, - &dev->pci->dev, adapter_nr, mfe_shared); + &dev->pci->dev, NULL, adapter_nr, + mfe_shared); if (res) goto frontend_detach; return res; diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 9d5b314142f1..6e995ef8c37e 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -690,7 +690,7 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type) struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; struct stv6110x_config *tunerconf = (input->nr & 1) ? &stv6110b : &stv6110a; - struct stv6110x_devctl *ctl; + const struct stv6110x_devctl *ctl; ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c); if (!ctl) { diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index 525ebfefeee8..2b667b315913 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c @@ -462,8 +462,8 @@ static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev, } if (vb2_dvb_register_bus(&ndev->frontends[num], - THIS_MODULE, NULL, - &ndev->pci_dev->dev, adapter_nr, 1)) { + THIS_MODULE, NULL, + &ndev->pci_dev->dev, NULL, adapter_nr, 1)) { dev_dbg(&ndev->pci_dev->dev, "%s(): unable to register DVB bus %d\n", __func__, num); @@ -771,10 +771,9 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, /* allocate device context */ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); - if (!ndev) goto dev_alloc_err; - memset(ndev, 0, sizeof(*ndev)); + ndev->old_fw = old_firmware; ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME); if (!ndev->wq) { diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c index 039bed3cc919..4e783a68bf4a 100644 --- a/drivers/media/pci/ngene/ngene-cards.c +++ b/drivers/media/pci/ngene/ngene-cards.c @@ -57,7 +57,7 @@ static int tuner_attach_stv6110(struct ngene_channel *chan) chan->dev->card_info->fe_config[chan->number]; struct stv6110x_config *tunerconf = (struct stv6110x_config *) chan->dev->card_info->tuner_config[chan->number]; - struct stv6110x_devctl *ctl; + const struct stv6110x_devctl *ctl; /* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */ if (chan->number < 2) diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c index 29d2094c42a0..9a2fdc78eb85 100644 --- a/drivers/media/pci/saa7134/saa7134-cards.c +++ b/drivers/media/pci/saa7134/saa7134-cards.c @@ -36,17 +36,23 @@ #include "xc5000.h" #include "s5h1411.h" -/* commly used strings */ -static char name_mute[] = "mute"; -static char name_radio[] = "Radio"; -static char name_tv[] = "Television"; -static char name_tv_mono[] = "TV (mono only)"; -static char name_comp[] = "Composite"; -static char name_comp1[] = "Composite1"; -static char name_comp2[] = "Composite2"; -static char name_comp3[] = "Composite3"; -static char name_comp4[] = "Composite4"; -static char name_svideo[] = "S-Video"; +/* Input names */ +const char * const saa7134_input_name[] = { + [SAA7134_INPUT_MUTE] = "mute", + [SAA7134_INPUT_RADIO] = "Radio", + [SAA7134_INPUT_TV] = "Television", + [SAA7134_INPUT_TV_MONO] = "TV (mono only)", + [SAA7134_INPUT_COMPOSITE] = "Composite", + [SAA7134_INPUT_COMPOSITE0] = "Composite0", + [SAA7134_INPUT_COMPOSITE1] = "Composite1", + [SAA7134_INPUT_COMPOSITE2] = "Composite2", + [SAA7134_INPUT_COMPOSITE3] = "Composite3", + [SAA7134_INPUT_COMPOSITE4] = "Composite4", + [SAA7134_INPUT_SVIDEO] = "S-Video", + [SAA7134_INPUT_SVIDEO0] = "S-Video0", + [SAA7134_INPUT_SVIDEO1] = "S-Video1", + [SAA7134_INPUT_COMPOSITE_OVER_SVIDEO] = "Composite over S-Video", +}; /* ------------------------------------------------------------------ */ /* board config info */ @@ -69,7 +75,7 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .inputs = {{ - .name = "default", + .type = SAA7134_INPUT_COMPOSITE, .vmux = 0, .amux = LINE1, }}, @@ -84,22 +90,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -114,40 +118,38 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 0xe000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, .gpio = 0x8000, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, .gpio = 0x0000, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x4000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x2000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x8000, }, @@ -163,34 +165,33 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 0xe000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, .gpio = 0x0000, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x4000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x2000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE2, .gpio = 0x8000, }, @@ -205,20 +206,19 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, /* Composite signal on S-Video input */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -235,40 +235,38 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 0x1E000, /* Set GP16 and unused 15,14,13 to Output */ .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, .gpio = 0x10000, /* GP16=1 selects TV input */ - .tv = 1, },{ -/* .name = name_tv_mono, +/* .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, .gpio = 0x0000, - .tv = 1, },{ -*/ .name = name_comp1, /* Composite signal on S-Video input */ +*/ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE2, /* .gpio = 0x4000, */ },{ - .name = name_comp2, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE, .vmux = 3, .amux = LINE2, /* .gpio = 0x4000, */ },{ - .name = name_svideo, /* S-Video signal on S-Video input */ + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, /* .gpio = 0x4000, */ }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x00000, /* GP16=0 selects FM radio antenna */ }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x10000, }, @@ -285,40 +283,38 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0xe000, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, .gpio = 0x8000, - .tv = 1, }, { - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, .gpio = 0x0000, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, }, { - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x4000, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x2000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x8000, }, @@ -334,21 +330,20 @@ struct saa7134_board saa7134_boards[] = { .empress_addr = 0x20, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, .mpeg = SAA7134_MPEG_EMPRESS, @@ -364,21 +359,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -390,35 +384,33 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ /* workaround for problems with normal TV sound */ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, }, }, @@ -432,32 +424,30 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = "CVid over SVid", + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -472,24 +462,23 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x820000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x20000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x20000, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x20000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x20000, }, @@ -504,20 +493,19 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 4, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp2, /* CVideo over SVideo Connector */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE1, }} @@ -531,31 +519,29 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ /* workaround for problems with normal TV sound */ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -567,18 +553,17 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, }}, }, [SAA7134_BOARD_CINERGY600] = { @@ -590,25 +575,24 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 4, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp2, /* CVideo over SVideo Connector */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -622,25 +606,24 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, }, }, @@ -655,21 +638,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -681,18 +663,17 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 4, .amux = LINE2, - .tv = 1, }}, }, [SAA7134_BOARD_ELSA_500TV] = { @@ -703,19 +684,17 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 7, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 8, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 8, .amux = LINE2, - .tv = 1, }}, }, [SAA7134_BOARD_ELSA_700TV] = { @@ -726,21 +705,20 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 4, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 6, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 7, .amux = LINE1, }}, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, }, }, @@ -753,21 +731,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 4, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, }, }, @@ -780,29 +757,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, .gpio = 0x0000, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 4, .amux = LINE2, .gpio = 0x0000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE2, .gpio = 0x0000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x200000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .gpio = 0x0000, }, @@ -815,18 +791,17 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, }}, }, [SAA7134_BOARD_10MOONSTVMASTER] = { @@ -839,34 +814,33 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0xe000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, .gpio = 0x0000, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x4000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x2000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE2, .gpio = 0x8000, }, @@ -881,23 +855,23 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .empress_addr = 0x20, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 4, .amux = LINE1, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE1, },{ - .name = name_comp3, + .type = SAA7134_INPUT_COMPOSITE3, .vmux = 0, .amux = LINE1, },{ - .name = name_comp4, + .type = SAA7134_INPUT_COMPOSITE4, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -912,18 +886,17 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, }}, }, [SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUS] = { @@ -935,21 +908,20 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x06c00012, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x0ac20012, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, .gpio = 0x08c20012, - .tv = 1, }}, /* radio and probably mute is missing */ }, [SAA7134_BOARD_CRONOS_PLUS] = { @@ -968,23 +940,23 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0xcf00, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .gpio = 2 << 14, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .gpio = 1 << 14, },{ - .name = name_comp3, + .type = SAA7134_INPUT_COMPOSITE3, .vmux = 0, .gpio = 0 << 14, },{ - .name = name_comp4, + .type = SAA7134_INPUT_COMPOSITE4, .vmux = 0, .gpio = 3 << 14, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .gpio = 2 << 14, }}, @@ -999,34 +971,33 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x03, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x00, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x02, }, { - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .amux = LINE1, .gpio = 0x02, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x02, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, .gpio = 0x01, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x00, }, @@ -1041,18 +1012,17 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .empress_addr = 0x20, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }}, .mpeg = SAA7134_MPEG_EMPRESS, .video_out = CCIR656, @@ -1068,22 +1038,21 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 4, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, }, }, @@ -1096,20 +1065,19 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_INACTIVE, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 1, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -1123,21 +1091,20 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -1150,21 +1117,20 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }}, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, }, @@ -1177,16 +1143,15 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -1199,30 +1164,28 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = "CVid over SVid", + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -1234,30 +1197,28 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = "CVid over SVid", + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -1270,30 +1231,28 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = "CVid over SVid", + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -1306,30 +1265,28 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x200000, }, @@ -1343,10 +1300,10 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, }}, }, @@ -1360,10 +1317,9 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, } }, }, [SAA7134_BOARD_NOVAC_PRIMETV7133] = { @@ -1375,15 +1331,14 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, }}, }, @@ -1396,29 +1351,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, }, @@ -1432,29 +1386,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, }, { - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, }, @@ -1467,12 +1420,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 7, .amux = TV, - .tv = 1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 7, .amux = LINE1, }}, @@ -1486,21 +1438,20 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -1512,25 +1463,24 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 4, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp2, /* CVideo over SVideo Connector */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE1, }}, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE2, }, }, @@ -1544,29 +1494,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x808c0080, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x00080, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x00080, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2_LEFT, - .tv = 1, .gpio = 0x00080, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x80000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE2, .gpio = 0x40000, }, @@ -1580,21 +1529,20 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -1607,18 +1555,17 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, }}, }, [SAA7134_BOARD_EMPIRE_PCI_TV_RADIO_LE] = { @@ -1631,29 +1578,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x4000, .inputs = {{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, .gpio = 0x8000, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x8000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, .gpio = 0x8000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, .gpio = 0x8000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio =0x8000, } @@ -1672,29 +1618,28 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x03, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x00, },{ - .name = name_comp, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 3, .amux = LINE1, .gpio = 0x02, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x02, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, .gpio = 0x01, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, .gpio = 0x00, }, @@ -1709,29 +1654,28 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 0x00300003, /* .gpiomask = 0x8c240003, */ .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x01, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, .gpio = 0x02, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, .gpio = 0x02, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x00300001, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x01, }, @@ -1745,21 +1689,20 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, }, }, @@ -1774,24 +1717,23 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x08000000, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x08000000, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x08000000, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x08000000, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x00000000, }, @@ -1805,21 +1747,19 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -1834,25 +1774,24 @@ struct saa7134_board saa7134_boards[] = { .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 4, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp2, /* CVideo over SVideo Connector */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -1866,29 +1805,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x1ce780, .inputs = {{ - .name = name_svideo, - .vmux = 0, /* CVideo over SVideo Connector - ok? */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, + .vmux = 0, .amux = LINE1, .gpio = 0x008080, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x008080, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x008080, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x80000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE2, .gpio = 0x0c8000, }, @@ -1903,20 +1841,19 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_INACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 1, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -1931,22 +1868,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -1961,25 +1896,24 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, }, @@ -1995,26 +1929,25 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 0x00200000, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, .gpio = 0x200000, /* GPIO21=High for TV input */ - .tv = 1, },{ - .name = name_comp1, /* Composite signal on S-Video input */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, /* S-Video signal on S-Video input */ + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */ }, @@ -2028,11 +1961,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -2049,20 +1982,19 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -2075,16 +2007,15 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -2098,29 +2029,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x0700, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x000, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x200, /* gpio by DScaler */ },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 0, .amux = LINE1, .gpio = 0x200, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, .gpio = 0x100, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x000, }, @@ -2135,26 +2065,25 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x00200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, .gpio = 0x200000, /* GPIO21=High for TV input */ - .tv = 1, },{ - .name = name_svideo, /* S-Video signal on S-Video input */ + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, },{ - .name = name_comp1, /* Composite signal on S-Video input */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */ }, @@ -2168,29 +2097,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = 0x60, .gpiomask = 0x8c1880, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 0, .amux = LINE1, .gpio = 0x800800, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x801000, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x800000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x880000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE2, .gpio = 0x840000, }, @@ -2213,29 +2141,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = 0x60, .gpiomask = 0x0700, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x000, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x200, /* gpio by DScaler */ },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 0, .amux = LINE1, .gpio = 0x200, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, .gpio = 0x100, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x000, }, @@ -2248,30 +2175,28 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .radio = { - .name = name_radio, /* radio unconfirmed */ + .type = SAA7134_INPUT_RADIO, /* radio unconfirmed */ .amux = LINE2, }, }, @@ -2286,24 +2211,23 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 1 << 21, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, .gpio = 0x0000000, - .tv = 1, },{ - .name = name_comp1, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, .gpio = 0x0000000, },{ - .name = name_svideo, /* S-Video input */ + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x0000000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -2322,29 +2246,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr= ADDR_UNSET, .gpiomask = 0x00010003, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x01, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, .gpio = 0x02, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE2, .gpio = 0x02, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, .gpio = 0x00010003, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x01, }, @@ -2362,21 +2285,20 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -2392,34 +2314,33 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00200003, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x00200003, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, .gpio = 0x00200003, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x00200003, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x00200003, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x00200003, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x00200003, }, @@ -2434,16 +2355,15 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -2458,16 +2378,15 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -2481,11 +2400,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -2499,27 +2418,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .empress_addr = 0x21, .inputs = {{ - .name = "Composite 0", + .type = SAA7134_INPUT_COMPOSITE0, .vmux = 0, .amux = LINE1, },{ - .name = "Composite 1", + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE2, },{ - .name = "Composite 2", + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 2, .amux = LINE1, },{ - .name = "Composite 3", + .type = SAA7134_INPUT_COMPOSITE3, .vmux = 3, .amux = LINE2, },{ - .name = "S-Video 0", + .type = SAA7134_INPUT_SVIDEO0, + .vmux = 8, .amux = LINE1, },{ - .name = "S-Video 1", + .type = SAA7134_INPUT_SVIDEO1, .vmux = 9, .amux = LINE2, }}, @@ -2538,27 +2458,27 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = "Composite 0", + .type = SAA7134_INPUT_COMPOSITE0, .vmux = 0, .amux = LINE1, },{ - .name = "Composite 1", + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE2, },{ - .name = "Composite 2", + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 2, .amux = LINE1, },{ - .name = "Composite 3", + .type = SAA7134_INPUT_COMPOSITE3, .vmux = 3, .amux = LINE2, },{ - .name = "S-Video 0", + .type = SAA7134_INPUT_SVIDEO0, .vmux = 8, .amux = LINE1, },{ - .name = "S-Video 1", + .type = SAA7134_INPUT_SVIDEO1, .vmux = 9, .amux = LINE2, }}, @@ -2572,20 +2492,19 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, /* Composite signal on S-Video input */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -2604,11 +2523,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -2622,16 +2541,15 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, }}, @@ -2645,25 +2563,24 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x080200000, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 4, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE2, }, { - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -2678,29 +2595,28 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 1 << 21, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x0000000, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, .gpio = 0x0200000, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .amux = LINE2, .gpio = 0x0200000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x0200000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -2717,21 +2633,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0xe880c0, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -2745,16 +2660,15 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -2770,21 +2684,20 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -2798,25 +2711,24 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 1 << 21, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 3, .amux = LINE2, /* unconfirmed, taken from Philips driver */ },{ - .name = name_comp2, - .vmux = 0, /* untested, Composite over S-Video */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, + .vmux = 0, /* untested */ .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -2834,17 +2746,16 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x80200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_svideo, /* NOT tested */ + .type = SAA7134_INPUT_SVIDEO, /* NOT tested */ .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -2861,26 +2772,25 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 0x00200000, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, /* Analog broadcast/cable TV */ + .type = SAA7134_INPUT_TV, /* Analog broadcast/cable TV */ .vmux = 1, .amux = TV, .gpio = 0x200000, /* GPIO21=High for TV input */ - .tv = 1, },{ - .name = name_svideo, /* S-Video signal on S-Video input */ + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, },{ - .name = name_comp1, /* Composite signal on S-Video input */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */ }, @@ -2894,11 +2804,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -2914,11 +2824,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_comp1, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, /* S-Video signal on S-Video input */ + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -2933,10 +2843,9 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x00600000, /* Bit 21 0=Radio, Bit 22 0=TV */ .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x00200000, }}, }, @@ -2950,25 +2859,24 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 1 << 21, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -2983,21 +2891,20 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 1 << 21, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -3012,16 +2919,15 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -3052,17 +2958,16 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0xca60000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 4, .amux = TV, - .tv = 1, .gpio = 0x04a61000, },{ - .name = name_comp2, /* Composite SVIDEO (B/W if signal is carried with SVIDEO) */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 1, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 9, /* 9 is correct as S-VIDEO1 according to a169.inf! */ .amux = LINE1, }}, @@ -3086,26 +2991,25 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x00600000, /* Bit 21 0=Radio, Bit 22 0=TV */ .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, .gpio = 0x200000, /* GPIO21=High for TV input */ - .tv = 1, },{ - .name = name_svideo, /* S-Video signal on S-Video input */ + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, },{ - .name = name_comp1, /* Composite signal on S-Video input */ + .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE, .vmux = 3, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */ }, @@ -3121,40 +3025,38 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 0xe000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, .gpio = 0x8000, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, .gpio = 0x0000, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x4000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x2000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x8000, }, @@ -3168,16 +3070,15 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -3193,11 +3094,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_comp1, /* Composite input */ + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, /* S-Video signal on S-Video input */ + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -3211,25 +3112,24 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, }, @@ -3244,21 +3144,20 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, }, }, @@ -3272,21 +3171,20 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT| TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 4, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, }, }, @@ -3301,25 +3199,24 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x000200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 4, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE2, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -3335,34 +3232,33 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x03, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x00, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, .gpio = 0x00, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, .gpio = 0x00, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x00, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x01, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, .gpio = 0x00, }, @@ -3378,16 +3274,15 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, }}, @@ -3405,22 +3300,21 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200100, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x0000100, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200100, }, @@ -3438,22 +3332,21 @@ struct saa7134_board saa7134_boards[] = { .ts_force_val = 1, .gpiomask = 0x0800100, /* GPIO 21 is an INPUT */ .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x0000100, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0800100, /* GPIO 23 HI for FM */ }, @@ -3470,22 +3363,21 @@ struct saa7134_board saa7134_boards[] = { .ts_type = SAA7134_MPEG_TS_SERIAL, .gpiomask = 0x0800100, /* GPIO 21 is an INPUT */ .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x0000100, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0800100, /* GPIO 23 HI for FM */ }, @@ -3499,16 +3391,15 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, }}, @@ -3523,33 +3414,31 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = 3, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 7, .amux = 4, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = 2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 0, .amux = 2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, /* .gpio = 0x00300001,*/ .gpio = 0x20000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = 0, }, }, @@ -3562,32 +3451,30 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = 3, - .tv = 1, },{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 7, .amux = 4, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = 2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 0, .amux = 2, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x20000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = 0, }, }, @@ -3600,29 +3487,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x7000, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = 1, - .tv = 1, .gpio = 0x50000, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = 2, .gpio = 0x2000, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = 2, .gpio = 0x2000, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .vmux = 1, .amux = 1, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .gpio = 0xf000, .amux = 0, }, @@ -3635,26 +3521,25 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = 0x61, .radio_addr = 0x60, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .vmux = 1, .amux = LINE1, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, .gpio = 0x43000, }, @@ -3668,16 +3553,15 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, }}, @@ -3693,21 +3577,20 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -3721,16 +3604,15 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 1<<21, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE2, }}, @@ -3746,10 +3628,9 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x0200000, }}, }, @@ -3764,29 +3645,28 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 1 << 21, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x0000000, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, .gpio = 0x0200000, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .amux = LINE2, .gpio = 0x0200000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x0200000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -3800,26 +3680,25 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 1 << 21, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x0000000, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, }, { - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -3832,25 +3711,24 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, }, }, @@ -3864,24 +3742,23 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x7000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, .gpio = 0x0000, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x2000, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x2000, }}, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE2, .gpio = 0x3000, }, @@ -3896,10 +3773,9 @@ struct saa7134_board saa7134_boards[] = { .tda829x_conf = { .lna_cfg = TDA8290_LNA_OFF }, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, /* FIXME: analog tv untested */ + .type = SAA7134_INPUT_TV, /* FIXME: analog tv untested */ .vmux = 1, .amux = TV, - .tv = 1, }}, }, [SAA7134_BOARD_AVERMEDIA_M135A] = { @@ -3912,26 +3788,25 @@ struct saa7134_board saa7134_boards[] = { .tda829x_conf = { .lna_cfg = TDA8290_LNA_GP0_HIGH_OFF }, .gpiomask = 0x020200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x00200000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x01, }, @@ -3946,26 +3821,25 @@ struct saa7134_board saa7134_boards[] = { .tda829x_conf = { .lna_cfg = TDA8290_LNA_OFF }, .gpiomask = 0x020200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x00200000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x01, }, @@ -3981,21 +3855,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }}, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, }, @@ -4010,18 +3883,17 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }}, }, [SAA7134_BOARD_BEHOLD_403FM] = { @@ -4035,21 +3907,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4065,18 +3936,17 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }}, }, [SAA7134_BOARD_BEHOLD_405FM] = { @@ -4092,21 +3962,20 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4122,20 +3991,19 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0xc0c000, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, .gpio = 0xc0c000, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, .gpio = 0xc0c000, }}, }, @@ -4151,24 +4019,23 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0xc0c000, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, .gpio = 0xc0c000, },{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, .gpio = 0xc0c000, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0xc0c000, }, @@ -4185,16 +4052,15 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, @@ -4211,25 +4077,24 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4246,25 +4111,24 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4280,21 +4144,20 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4311,21 +4174,20 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4342,21 +4204,20 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4372,24 +4233,23 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x000A8004, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, .gpio = 0x000A8004, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, .gpio = 0x000A8000, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x000A8000, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x000A8000, }, @@ -4404,21 +4264,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4432,21 +4291,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4460,21 +4318,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4488,21 +4345,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4517,21 +4373,20 @@ struct saa7134_board saa7134_boards[] = { .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4546,21 +4401,20 @@ struct saa7134_board saa7134_boards[] = { .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4575,21 +4429,20 @@ struct saa7134_board saa7134_boards[] = { .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4604,21 +4457,20 @@ struct saa7134_board saa7134_boards[] = { .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, },{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, },{ - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }}, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -4636,21 +4488,20 @@ struct saa7134_board saa7134_boards[] = { .empress_addr = 0x20, .tda9887_conf = TDA9887_PRESENT, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, .mpeg = SAA7134_MPEG_EMPRESS, @@ -4673,21 +4524,20 @@ struct saa7134_board saa7134_boards[] = { .empress_addr = 0x20, .tda9887_conf = TDA9887_PRESENT, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, .mpeg = SAA7134_MPEG_EMPRESS, @@ -4712,21 +4562,20 @@ struct saa7134_board saa7134_boards[] = { .empress_addr = 0x20, .tda9887_conf = TDA9887_PRESENT, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, .mpeg = SAA7134_MPEG_EMPRESS, @@ -4747,21 +4596,20 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, /* untested */ .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -4776,30 +4624,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0xf000, .inputs = {{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE2, .gpio = 0x0000, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x2000, - .tv = 1 }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x2000, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x1000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE2, .gpio = 0x6000, }, @@ -4813,11 +4659,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, @@ -4832,16 +4678,15 @@ struct saa7134_board saa7134_boards[] = { .tda829x_conf = { .lna_cfg = TDA8290_LNA_OFF }, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, @@ -4857,21 +4702,20 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -4885,21 +4729,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, }, }, @@ -4912,21 +4755,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, }, { - .name = name_comp, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 0, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, }, }, @@ -4938,16 +4780,15 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, @@ -4962,21 +4803,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, } }, @@ -4990,11 +4830,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { - .name = name_comp, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, } }, @@ -5009,21 +4849,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 4, .amux = TV, - .tv = 1, }, { - .name = name_comp, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, }, }, @@ -5038,21 +4877,20 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -5067,21 +4905,20 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 1 << 21, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 0, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -5097,21 +4934,20 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 1 << 21, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 0, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -5125,29 +4961,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x801a8087, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, .gpio = 0x624000, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, .gpio = 0x624000, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 1, .amux = LINE1, .gpio = 0x624000, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x624001, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, }, }, @@ -5161,16 +4996,15 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 4, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, @@ -5186,25 +5020,24 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, }, { - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 0, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0200000, }, @@ -5218,30 +5051,29 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = 0x60, .gpiomask = 0x80000700, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, .gpio = 0x100, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x200, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x200, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .vmux = 1, .amux = LINE1, .gpio = 0x100, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .vmux = 8, .amux = 2, }, @@ -5257,18 +5089,17 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, #if 0 /* FIXME */ }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x200, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x200, @@ -5276,14 +5107,14 @@ struct saa7134_board saa7134_boards[] = { } }, #if 0 .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .vmux = 1, .amux = LINE1, .gpio = 0x100, }, #endif .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .vmux = 0, .amux = TV, }, @@ -5298,24 +5129,23 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 0x00300003, /* .gpiomask = 0x8c240003, */ .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x01, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, .gpio = 0x02, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x00300001, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, .gpio = 0x01, }, @@ -5331,29 +5161,28 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x03, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x00, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x00, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x00, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, .gpio = 0x01, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, .gpio = 0x00, }, @@ -5368,11 +5197,11 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, /* Not tested */ .amux = LINE1 } }, @@ -5387,21 +5216,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 2, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 9, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, }, }, @@ -5416,13 +5244,12 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, } }, .radio = { /* untested */ - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, }, }, @@ -5436,16 +5263,15 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 4, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, @@ -5459,10 +5285,10 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, } }, }, @@ -5479,25 +5305,24 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE2, }, }, @@ -5512,7 +5337,7 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x389c00, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x01fc00, @@ -5529,21 +5354,20 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 2, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 9, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, }, }, @@ -5556,21 +5380,20 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 2, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 9, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, }, }, @@ -5584,16 +5407,15 @@ struct saa7134_board saa7134_boards[] = { .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, @@ -5607,25 +5429,24 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = 0x60, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = TV, }, }, @@ -5642,29 +5463,28 @@ struct saa7134_board saa7134_boards[] = { .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, .gpio = 0x00050000, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x00050000, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, .gpio = 0x00050000, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x00050000, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .vmux = 0, .amux = TV, .gpio = 0x00050000, @@ -5681,21 +5501,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, }, @@ -5710,21 +5529,20 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = LINE2, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 1, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, }, @@ -5736,15 +5554,15 @@ struct saa7134_board saa7134_boards[] = { .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE1, }, { - .name = name_comp3, + .type = SAA7134_INPUT_COMPOSITE3, .vmux = 2, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, @@ -5760,21 +5578,20 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 1 << 21, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = { { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 3, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0000000, }, @@ -5790,7 +5607,7 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x618E700, .inputs = {{ - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE1, .gpio = 0x6010000, @@ -5809,21 +5626,20 @@ struct saa7134_board saa7134_boards[] = { .gpiomask = 1 << 11, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = TV, - .tv = 1, }, { - .name = name_comp, + .type = SAA7134_INPUT_COMPOSITE, .vmux = 4, .amux = LINE1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE1, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = TV, .gpio = 0x0000800, }, @@ -5837,16 +5653,15 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_GO7007, .inputs = { { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, }, { - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 3, .amux = TV, - .tv = 1, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 6, .amux = LINE1, } }, @@ -5862,25 +5677,24 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ - .name = name_tv, + .type = SAA7134_INPUT_TV, .vmux = 1, .amux = LINE2, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 0, .amux = LINE2, }, { - .name = name_comp2, + .type = SAA7134_INPUT_COMPOSITE2, .vmux = 3, .amux = LINE2, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, } }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, }, }, @@ -5893,29 +5707,28 @@ struct saa7134_board saa7134_boards[] = { .radio_addr = ADDR_UNSET, .gpiomask = 0x0d, .inputs = {{ - .name = name_tv_mono, + .type = SAA7134_INPUT_TV_MONO, .vmux = 1, .amux = LINE1, .gpio = 0x00, - .tv = 1, }, { - .name = name_comp1, + .type = SAA7134_INPUT_COMPOSITE1, .vmux = 3, .amux = LINE2, .gpio = 0x08, }, { - .name = name_svideo, + .type = SAA7134_INPUT_SVIDEO, .vmux = 8, .amux = LINE2, .gpio = 0x08, } }, .radio = { - .name = name_radio, + .type = SAA7134_INPUT_RADIO, .amux = LINE1, .gpio = 0x04, }, .mute = { - .name = name_mute, + .type = SAA7134_INPUT_MUTE, .amux = LINE1, .gpio = 0x08, }, diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index e227b02cc122..42bc4172febd 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c @@ -112,7 +112,7 @@ int (*saa7134_dmasound_exit)(struct saa7134_dev *dev); printk(KERN_DEBUG pr_fmt("irq: " fmt), ## arg); \ } while (0) -void saa7134_track_gpio(struct saa7134_dev *dev, char *msg) +void saa7134_track_gpio(struct saa7134_dev *dev, const char *msg) { unsigned long mode,status; @@ -806,6 +806,153 @@ static void must_configure_manually(int has_eeprom) } } +static void saa7134_unregister_media_device(struct saa7134_dev *dev) +{ + +#ifdef CONFIG_MEDIA_CONTROLLER + if (!dev->media_dev) + return; + media_device_unregister(dev->media_dev); + media_device_cleanup(dev->media_dev); + kfree(dev->media_dev); + dev->media_dev = NULL; +#endif +} + +static void saa7134_media_release(struct saa7134_dev *dev) +{ +#ifdef CONFIG_MEDIA_CONTROLLER + int i; + + for (i = 0; i < SAA7134_INPUT_MAX + 1; i++) + media_device_unregister_entity(&dev->input_ent[i]); +#endif +} + +static void saa7134_create_entities(struct saa7134_dev *dev) +{ +#if defined(CONFIG_MEDIA_CONTROLLER) + int ret, i; + struct media_entity *entity; + struct media_entity *decoder = NULL; + + /* Check if it is using an external analog TV demod */ + media_device_for_each_entity(entity, dev->media_dev) { + if (entity->function == MEDIA_ENT_F_ATV_DECODER) + decoder = entity; + break; + } + + /* + * saa713x is not using an external ATV demod. + * Register the internal one + */ + if (!decoder) { + dev->demod.name = "saa713x"; + dev->demod_pad[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK; + dev->demod_pad[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE; + dev->demod_pad[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE; + dev->demod.function = MEDIA_ENT_F_ATV_DECODER; + + ret = media_entity_pads_init(&dev->demod, DEMOD_NUM_PADS, + dev->demod_pad); + if (ret < 0) + pr_err("failed to initialize demod pad!\n"); + + ret = media_device_register_entity(dev->media_dev, &dev->demod); + if (ret < 0) + pr_err("failed to register demod entity!\n"); + + dev->decoder = &dev->demod; + } else { + dev->decoder = decoder; + } + + /* Initialize Video, VBI and Radio pads */ + dev->video_pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&dev->video_dev->entity, 1, + &dev->video_pad); + if (ret < 0) + pr_err("failed to initialize video media entity!\n"); + + dev->vbi_pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&dev->vbi_dev->entity, 1, + &dev->vbi_pad); + if (ret < 0) + pr_err("failed to initialize vbi media entity!\n"); + + /* Create entities for each input connector */ + for (i = 0; i < SAA7134_INPUT_MAX; i++) { + struct media_entity *ent = &dev->input_ent[i]; + struct saa7134_input *in = &card_in(dev, i); + + if (in->type == SAA7134_NO_INPUT) + break; + + /* This input uses the S-Video connector */ + if (in->type == SAA7134_INPUT_COMPOSITE_OVER_SVIDEO) + continue; + + ent->name = saa7134_input_name[in->type]; + ent->flags = MEDIA_ENT_FL_CONNECTOR; + dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE; + + switch (in->type) { + case SAA7134_INPUT_COMPOSITE: + case SAA7134_INPUT_COMPOSITE0: + case SAA7134_INPUT_COMPOSITE1: + case SAA7134_INPUT_COMPOSITE2: + case SAA7134_INPUT_COMPOSITE3: + case SAA7134_INPUT_COMPOSITE4: + ent->function = MEDIA_ENT_F_CONN_COMPOSITE; + break; + case SAA7134_INPUT_SVIDEO: + case SAA7134_INPUT_SVIDEO0: + case SAA7134_INPUT_SVIDEO1: + ent->function = MEDIA_ENT_F_CONN_SVIDEO; + break; + default: + /* + * SAA7134_INPUT_TV and SAA7134_INPUT_TV_MONO. + * + * Please notice that neither SAA7134_INPUT_MUTE or + * SAA7134_INPUT_RADIO are defined at + * saa7134_board.input. + */ + ent->function = MEDIA_ENT_F_CONN_RF; + break; + } + + ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); + if (ret < 0) + pr_err("failed to initialize input pad[%d]!\n", i); + + ret = media_device_register_entity(dev->media_dev, ent); + if (ret < 0) + pr_err("failed to register input entity %d!\n", i); + } + + /* Create input for Radio RF connector */ + if (card_has_radio(dev)) { + struct saa7134_input *in = &saa7134_boards[dev->board].radio; + struct media_entity *ent = &dev->input_ent[i]; + + ent->name = saa7134_input_name[in->type]; + ent->flags = MEDIA_ENT_FL_CONNECTOR; + dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE; + ent->function = MEDIA_ENT_F_CONN_RF; + + ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); + if (ret < 0) + pr_err("failed to initialize input pad[%d]!\n", i); + + ret = media_device_register_entity(dev->media_dev, ent); + if (ret < 0) + pr_err("failed to register input entity %d!\n", i); + } +#endif +} + static struct video_device *vdev_init(struct saa7134_dev *dev, struct video_device *template, char *type) @@ -826,6 +973,8 @@ static struct video_device *vdev_init(struct saa7134_dev *dev, static void saa7134_unregister_video(struct saa7134_dev *dev) { + saa7134_media_release(dev); + if (dev->video_dev) { if (video_is_registered(dev->video_dev)) video_unregister_device(dev->video_dev); @@ -889,6 +1038,18 @@ static int saa7134_initdev(struct pci_dev *pci_dev, if (NULL == dev) return -ENOMEM; + dev->nr = saa7134_devcount; + sprintf(dev->name, "saa%x[%d]", pci_dev->device, dev->nr); + +#ifdef CONFIG_MEDIA_CONTROLLER + dev->media_dev = v4l2_mc_pci_media_device_init(pci_dev, dev->name); + if (!dev->media_dev) { + err = -ENOMEM; + goto fail0; + } + dev->v4l2_dev.mdev = dev->media_dev; +#endif + err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev); if (err) goto fail0; @@ -900,9 +1061,6 @@ static int saa7134_initdev(struct pci_dev *pci_dev, goto fail1; } - dev->nr = saa7134_devcount; - sprintf(dev->name,"saa%x[%d]",pci_dev->device,dev->nr); - /* pci quirks */ if (pci_pci_problems) { if (pci_pci_problems & PCIPCI_TRITON) @@ -1102,6 +1260,15 @@ static int saa7134_initdev(struct pci_dev *pci_dev, dev->name, video_device_node_name(dev->radio_dev)); } +#ifdef CONFIG_MEDIA_CONTROLLER + saa7134_create_entities(dev); + + err = v4l2_mc_create_media_graph(dev->media_dev); + if (err) { + pr_err("failed to create media graph\n"); + goto fail5; + } +#endif /* everything worked */ saa7134_devcount++; @@ -1109,6 +1276,18 @@ static int saa7134_initdev(struct pci_dev *pci_dev, saa7134_dmasound_init(dev); request_submodules(dev); + + /* + * Do it at the end, to reduce dynamic configuration changes during + * the device init. Yet, as request_modules() can be async, the + * topology will likely change after load the saa7134 subdrivers. + */ +#ifdef CONFIG_MEDIA_CONTROLLER + err = media_device_register(dev->media_dev); + if (err) + goto fail5; +#endif + return 0; fail5: @@ -1126,6 +1305,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev, fail1: v4l2_device_unregister(&dev->v4l2_dev); fail0: +#ifdef CONFIG_MEDIA_CONTROLLER + kfree(dev->media_dev); +#endif kfree(dev); return err; } @@ -1188,9 +1370,10 @@ static void saa7134_finidev(struct pci_dev *pci_dev) release_mem_region(pci_resource_start(pci_dev,0), pci_resource_len(pci_dev,0)); - v4l2_device_unregister(&dev->v4l2_dev); + saa7134_unregister_media_device(dev); + /* free memory */ kfree(dev); } diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c index 101ba8729416..db987e5b93eb 100644 --- a/drivers/media/pci/saa7134/saa7134-dvb.c +++ b/drivers/media/pci/saa7134/saa7134-dvb.c @@ -1883,8 +1883,15 @@ static int dvb_init(struct saa7134_dev *dev) fe0->dvb.frontend->callback = saa7134_tuner_callback; /* register everything else */ +#ifndef CONFIG_MEDIA_CONTROLLER_DVB ret = vb2_dvb_register_bus(&dev->frontends, THIS_MODULE, dev, - &dev->pci->dev, adapter_nr, 0); + &dev->pci->dev, NULL, + adapter_nr, 0); +#else + ret = vb2_dvb_register_bus(&dev->frontends, THIS_MODULE, dev, + &dev->pci->dev, dev->media_dev, + adapter_nr, 0); +#endif /* this sequence is necessary to make the tda1004x load its firmware * and to enter analog mode of hybrid boards diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index 56b932c97196..ca417a454d67 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c @@ -189,6 +189,7 @@ static const struct v4l2_ioctl_ops ts_ioctl_ops = { .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_g_frequency = saa7134_g_frequency, @@ -286,7 +287,7 @@ static int empress_init(struct saa7134_dev *dev) * transfers that do not start at the beginning of a page. A USERPTR * can start anywhere in a page, so USERPTR support is a no-go. */ - q->io_modes = VB2_MMAP | VB2_READ; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q->drv_priv = &dev->ts_q; q->ops = &saa7134_empress_qops; q->gfp_flags = GFP_DMA32; diff --git a/drivers/media/pci/saa7134/saa7134-go7007.c b/drivers/media/pci/saa7134/saa7134-go7007.c index 8a2abb34186b..2799538e2d7e 100644 --- a/drivers/media/pci/saa7134/saa7134-go7007.c +++ b/drivers/media/pci/saa7134/saa7134-go7007.c @@ -378,7 +378,7 @@ static int saa7134_go7007_send_firmware(struct go7007 *go, u8 *data, int len) return 0; } -static struct go7007_hpi_ops saa7134_go7007_hpi_ops = { +static const struct go7007_hpi_ops saa7134_go7007_hpi_ops = { .interface_reset = saa7134_go7007_interface_reset, .write_interrupt = saa7134_go7007_write_interrupt, .read_interrupt = saa7134_go7007_read_interrupt, diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c index 21a579309575..38f94b742e28 100644 --- a/drivers/media/pci/saa7134/saa7134-tvaudio.c +++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c @@ -192,7 +192,7 @@ static void mute_input_7134(struct saa7134_dev *dev) in = dev->input; mute = (dev->ctl_mute || (dev->automute && (&card(dev).radio) != in)); - if (card(dev).mute.name) { + if (card(dev).mute.type) { /* * 7130 - we'll mute using some unconnected audio input * 7134 - we'll probably should switch external mux with gpio @@ -204,13 +204,14 @@ static void mute_input_7134(struct saa7134_dev *dev) if (dev->hw_mute == mute && dev->hw_input == in && !dev->insuspend) { audio_dbg(1, "mute/input: nothing to do [mute=%d,input=%s]\n", - mute, in->name); + mute, saa7134_input_name[in->type]); return; } audio_dbg(1, "ctl_mute=%d automute=%d input=%s => mute=%d input=%s\n", dev->ctl_mute, dev->automute, - dev->input->name, mute, in->name); + saa7134_input_name[dev->input->type], mute, + saa7134_input_name[in->type]); dev->hw_mute = mute; dev->hw_input = in; @@ -245,7 +246,7 @@ static void mute_input_7134(struct saa7134_dev *dev) mask = card(dev).gpiomask; saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, mask, mask); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, mask, in->gpio); - saa7134_track_gpio(dev,in->name); + saa7134_track_gpio(dev, saa7134_input_name[in->type]); } static void tvaudio_setmode(struct saa7134_dev *dev, @@ -756,14 +757,14 @@ static int mute_input_7133(struct saa7134_dev *dev) if (0 != card(dev).gpiomask) { mask = card(dev).gpiomask; - if (card(dev).mute.name && dev->ctl_mute) + if (card(dev).mute.type && dev->ctl_mute) in = &card(dev).mute; else in = dev->input; saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, mask, mask); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, mask, in->gpio); - saa7134_track_gpio(dev,in->name); + saa7134_track_gpio(dev, saa7134_input_name[in->type]); } return 0; diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c index a63c1366a64e..ffa39543eb65 100644 --- a/drivers/media/pci/saa7134/saa7134-video.c +++ b/drivers/media/pci/saa7134/saa7134-video.c @@ -409,7 +409,8 @@ static void set_tvnorm(struct saa7134_dev *dev, struct saa7134_tvnorm *norm) static void video_mux(struct saa7134_dev *dev, int input) { - video_dbg("video input = %d [%s]\n", input, card_in(dev, input).name); + video_dbg("video input = %d [%s]\n", + input, saa7134_input_name[card_in(dev, input).type]); dev->ctl_input = input; set_tvnorm(dev, dev->tvnorm); saa7134_tvaudio_setinput(dev, &card_in(dev, input)); @@ -478,8 +479,7 @@ void saa7134_set_tvnorm_hw(struct saa7134_dev *dev) { saa7134_set_decoder(dev); - if (card_in(dev, dev->ctl_input).tv) - saa_call_all(dev, video, s_std, dev->tvnorm->id); + saa_call_all(dev, video, s_std, dev->tvnorm->id); /* Set the correct norm for the saa6752hs. This function does nothing if there is no saa6752hs. */ saa_call_empress(dev, video, s_std, dev->tvnorm->id); @@ -785,6 +785,63 @@ static int stop_preview(struct saa7134_dev *dev) return 0; } +/* + * Media Controller helper functions + */ + +static int saa7134_enable_analog_tuner(struct saa7134_dev *dev) +{ +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_device *mdev = dev->media_dev; + struct media_entity *source; + struct media_link *link, *found_link = NULL; + int ret, active_links = 0; + + if (!mdev || !dev->decoder) + return 0; + + /* + * This will find the tuner that is connected into the decoder. + * Technically, this is not 100% correct, as the device may be + * using an analog input instead of the tuner. However, as we can't + * do DVB streaming while the DMA engine is being used for V4L2, + * this should be enough for the actual needs. + */ + list_for_each_entry(link, &dev->decoder->links, list) { + if (link->sink->entity == dev->decoder) { + found_link = link; + if (link->flags & MEDIA_LNK_FL_ENABLED) + active_links++; + break; + } + } + + if (active_links == 1 || !found_link) + return 0; + + source = found_link->source->entity; + list_for_each_entry(link, &source->links, list) { + struct media_entity *sink; + int flags = 0; + + sink = link->sink->entity; + + if (sink == dev->decoder) + flags = MEDIA_LNK_FL_ENABLED; + + ret = media_entity_setup_link(link, flags); + if (ret) { + pr_err("Couldn't change link %s->%s to %s. Error %d\n", + source->name, sink->name, + flags ? "enabled" : "disabled", + ret); + return ret; + } + } +#endif + return 0; +} + /* ------------------------------------------------------------------ */ static int buffer_activate(struct saa7134_dev *dev, @@ -924,6 +981,9 @@ static int queue_setup(struct vb2_queue *q, *nplanes = 1; sizes[0] = size; alloc_ctxs[0] = dev->alloc_ctx; + + saa7134_enable_analog_tuner(dev); + return 0; } @@ -1219,10 +1279,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv, f->fmt.pix.height = dev->height; f->fmt.pix.field = dev->field; f->fmt.pix.pixelformat = dev->fmt->fourcc; - f->fmt.pix.bytesperline = - (f->fmt.pix.width * dev->fmt->depth) >> 3; + if (dev->fmt->planar) + f->fmt.pix.bytesperline = f->fmt.pix.width; + else + f->fmt.pix.bytesperline = + (f->fmt.pix.width * dev->fmt->depth) / 8; f->fmt.pix.sizeimage = - f->fmt.pix.height * f->fmt.pix.bytesperline; + (f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } @@ -1298,10 +1361,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv, if (f->fmt.pix.height > maxh) f->fmt.pix.height = maxh; f->fmt.pix.width &= ~0x03; - f->fmt.pix.bytesperline = - (f->fmt.pix.width * fmt->depth) >> 3; + if (fmt->planar) + f->fmt.pix.bytesperline = f->fmt.pix.width; + else + f->fmt.pix.bytesperline = + (f->fmt.pix.width * fmt->depth) / 8; f->fmt.pix.sizeimage = - f->fmt.pix.height * f->fmt.pix.bytesperline; + (f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; @@ -1381,13 +1447,19 @@ int saa7134_enum_input(struct file *file, void *priv, struct v4l2_input *i) n = i->index; if (n >= SAA7134_INPUT_MAX) return -EINVAL; - if (NULL == card_in(dev, i->index).name) + if (card_in(dev, i->index).type == SAA7134_NO_INPUT) return -EINVAL; i->index = n; - i->type = V4L2_INPUT_TYPE_CAMERA; - strcpy(i->name, card_in(dev, n).name); - if (card_in(dev, n).tv) + strcpy(i->name, saa7134_input_name[card_in(dev, n).type]); + switch (card_in(dev, n).type) { + case SAA7134_INPUT_TV: + case SAA7134_INPUT_TV_MONO: i->type = V4L2_INPUT_TYPE_TUNER; + break; + default: + i->type = V4L2_INPUT_TYPE_CAMERA; + break; + } if (n == dev->ctl_input) { int v1 = saa_readb(SAA7134_STATUS_VIDEO1); int v2 = saa_readb(SAA7134_STATUS_VIDEO2); @@ -1419,7 +1491,7 @@ int saa7134_s_input(struct file *file, void *priv, unsigned int i) if (i >= SAA7134_INPUT_MAX) return -EINVAL; - if (NULL == card_in(dev, i).name) + if (card_in(dev, i).type == SAA7134_NO_INPUT) return -EINVAL; video_mux(dev, i); return 0; @@ -1656,12 +1728,13 @@ int saa7134_g_tuner(struct file *file, void *priv, return -EINVAL; memset(t, 0, sizeof(*t)); for (n = 0; n < SAA7134_INPUT_MAX; n++) { - if (card_in(dev, n).tv) + if (card_in(dev, n).type == SAA7134_INPUT_TV || + card_in(dev, n).type == SAA7134_INPUT_TV_MONO) break; } if (n == SAA7134_INPUT_MAX) return -EINVAL; - if (NULL != card_in(dev, n).name) { + if (card_in(dev, n).type != SAA7134_NO_INPUT) { strcpy(t->name, "Television"); t->type = V4L2_TUNER_ANALOG_TV; saa_call_all(dev, tuner, g_tuner, t); @@ -1906,6 +1979,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_s_std = saa7134_s_std, .vidioc_g_std = saa7134_g_std, .vidioc_querystd = saa7134_querystd, @@ -2089,7 +2163,7 @@ int saa7134_video_init1(struct saa7134_dev *dev) * USERPTR support is a no-go unless the application knows about these * limitations and has special support for this. */ - q->io_modes = VB2_MMAP | VB2_READ; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; if (saa7134_userptr) q->io_modes |= VB2_USERPTR; q->drv_priv = &dev->video_q; diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h index 5938bc781999..8936568fab94 100644 --- a/drivers/media/pci/saa7134/saa7134.h +++ b/drivers/media/pci/saa7134/saa7134.h @@ -361,12 +361,29 @@ struct saa7134_card_ir { #define SET_CLOCK_INVERTED (1 << 2) #define SET_VSYNC_OFF (1 << 3) +enum saa7134_input_types { + SAA7134_NO_INPUT = 0, + SAA7134_INPUT_MUTE, + SAA7134_INPUT_RADIO, + SAA7134_INPUT_TV, + SAA7134_INPUT_TV_MONO, + SAA7134_INPUT_COMPOSITE, + SAA7134_INPUT_COMPOSITE0, + SAA7134_INPUT_COMPOSITE1, + SAA7134_INPUT_COMPOSITE2, + SAA7134_INPUT_COMPOSITE3, + SAA7134_INPUT_COMPOSITE4, + SAA7134_INPUT_SVIDEO, + SAA7134_INPUT_SVIDEO0, + SAA7134_INPUT_SVIDEO1, + SAA7134_INPUT_COMPOSITE_OVER_SVIDEO, +}; + struct saa7134_input { - char *name; - unsigned int vmux; - enum saa7134_audio_in amux; - unsigned int gpio; - unsigned int tv:1; + enum saa7134_input_types type; + unsigned int vmux; + enum saa7134_audio_in amux; + unsigned int gpio; }; enum saa7134_mpeg_type { @@ -410,7 +427,7 @@ struct saa7134_board { unsigned int ts_force_val:1; }; -#define card_has_radio(dev) (NULL != saa7134_boards[dev->board].radio.name) +#define card_has_radio(dev) (SAA7134_NO_INPUT != saa7134_boards[dev->board].radio.type) #define card_is_empress(dev) (SAA7134_MPEG_EMPRESS == saa7134_boards[dev->board].mpeg) #define card_is_dvb(dev) (SAA7134_MPEG_DVB == saa7134_boards[dev->board].mpeg) #define card_is_go7007(dev) (SAA7134_MPEG_GO7007 == saa7134_boards[dev->board].mpeg) @@ -654,6 +671,19 @@ struct saa7134_dev { /* I2C keyboard data */ struct IR_i2c_init_data init_data; +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_device *media_dev; + + struct media_entity input_ent[SAA7134_INPUT_MAX + 1]; + struct media_pad input_pad[SAA7134_INPUT_MAX + 1]; + + struct media_entity demod; + struct media_pad demod_pad[DEMOD_NUM_PADS]; + + struct media_pad video_pad, vbi_pad; + struct media_entity *decoder; +#endif + #if IS_ENABLED(CONFIG_VIDEO_SAA7134_DVB) /* SAA7134_MPEG_DVB only */ struct vb2_dvb_frontends frontends; @@ -727,7 +757,7 @@ extern struct mutex saa7134_devlist_lock; extern int saa7134_no_overlay; extern bool saa7134_userptr; -void saa7134_track_gpio(struct saa7134_dev *dev, char *msg); +void saa7134_track_gpio(struct saa7134_dev *dev, const char *msg); void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value); #define SAA7134_PGTABLE_SIZE 4096 @@ -760,6 +790,7 @@ extern int (*saa7134_dmasound_exit)(struct saa7134_dev *dev); /* saa7134-cards.c */ extern struct saa7134_board saa7134_boards[]; +extern const char * const saa7134_input_name[]; extern const unsigned int saa7134_bcount; extern struct pci_device_id saa7134_pci_tbl[]; diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index a69dc6a0752b..18d229fa65cf 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -1739,7 +1739,7 @@ static int alps_tdlb7_request_firmware(struct dvb_frontend* fe, const struct fir #endif } -static struct sp8870_config alps_tdlb7_config = { +static const struct sp8870_config alps_tdlb7_config = { .demod_address = 0x71, .request_firmware = alps_tdlb7_request_firmware, diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c index de54310a2660..9f48100227f1 100644 --- a/drivers/media/pci/ttpci/budget.c +++ b/drivers/media/pci/ttpci/budget.c @@ -644,7 +644,7 @@ static void frontend_init(struct budget *budget) } case 0x101c: { /* TT S2-1600 */ - struct stv6110x_devctl *ctl; + const struct stv6110x_devctl *ctl; saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO); msleep(50); saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI); @@ -697,7 +697,7 @@ static void frontend_init(struct budget *budget) break; case 0x1020: { /* Omicom S2 */ - struct stv6110x_devctl *ctl; + const struct stv6110x_devctl *ctl; saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO); msleep(50); saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI); diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 8b89ebe16d94..201f5c296a95 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -54,15 +54,6 @@ config VIDEO_VIU Say Y here if you want to enable VIU device on MPC5121e Rev2+. In doubt, say N. -config VIDEO_TIMBERDALE - tristate "Support for timberdale Video In/LogiWIN" - depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && HAS_DMA - depends on (MFD_TIMBERDALE && TIMB_DMA) || COMPILE_TEST - select VIDEO_ADV7180 - select VIDEOBUF_DMA_CONTIG - ---help--- - Add support for the Video In peripherial of the timberdale FPGA. - config VIDEO_M32R_AR tristate "AR devices" depends on VIDEO_V4L2 @@ -120,6 +111,19 @@ source "drivers/media/platform/s5p-tv/Kconfig" source "drivers/media/platform/am437x/Kconfig" source "drivers/media/platform/xilinx/Kconfig" +config VIDEO_TI_CAL + tristate "TI CAL (Camera Adaptation Layer) driver" + depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on SOC_DRA7XX || COMPILE_TEST + depends on HAS_DMA + select VIDEOBUF2_DMA_CONTIG + default n + ---help--- + Support for the TI CAL (Camera Adaptation Layer) block + found on DRA72X SoC. + In TI Technical Reference Manual this module is referred as + Camera Interface Subsystem (CAMSS). + endif # V4L_PLATFORM_DRIVERS menuconfig V4L_MEM2MEM_DRIVERS diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index efa0295af87b..bbb7bd1eb268 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -2,7 +2,6 @@ # Makefile for the video capture/playback device drivers. # -obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o @@ -18,6 +17,8 @@ obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/ +obj-$(CONFIG_VIDEO_TI_CAL) += ti-vpe/ + obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o obj-$(CONFIG_VIDEO_CODA) += coda/ diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index 7d28899f89ce..6efe9d002961 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx) /* Calculate bytesused field */ if (dst_buf->sequence == 0) { - vb2_set_plane_payload(&dst_buf->vb2_buf, 0, + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr + ctx->vpu_header_size[0] + ctx->vpu_header_size[1] + ctx->vpu_header_size[2]); diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c index ffbefdff6b5e..6fba32bec974 100644 --- a/drivers/media/platform/davinci/dm644x_ccdc.c +++ b/drivers/media/platform/davinci/dm644x_ccdc.c @@ -261,7 +261,7 @@ static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params) */ if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) { if (fpc_physaddr != NULL) { - free_pages((unsigned long)fpc_physaddr, + free_pages((unsigned long)fpc_virtaddr, get_order (config_params->fault_pxl.fp_num * FP_NUM_BYTES)); diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index 93782f15b825..a600e32e2543 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -700,7 +700,7 @@ static unsigned int gsc_m2m_poll(struct file *file, { struct gsc_ctx *ctx = fh_to_ctx(file->private_data); struct gsc_dev *gsc = ctx->gsc_dev; - int ret; + unsigned int ret; if (mutex_lock_interruptible(&gsc->lock)) return -ERESTARTSYS; diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index e79ddbb1e14f..feb521f28e14 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -389,13 +389,19 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd, struct fimc_source_info *pd = &fmd->sensor[index].pdata; struct device_node *rem, *ep, *np; struct v4l2_of_endpoint endpoint; + int ret; /* Assume here a port node can have only one endpoint node. */ ep = of_get_next_child(port, NULL); if (!ep) return 0; - v4l2_of_parse_endpoint(ep, &endpoint); + ret = v4l2_of_parse_endpoint(ep, &endpoint); + if (ret) { + of_node_put(ep); + return ret; + } + if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS) return -EINVAL; @@ -486,8 +492,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) continue; ret = fimc_md_parse_port_node(fmd, port, index); - if (ret < 0) + if (ret < 0) { + of_node_put(node); goto rpm_put; + } index++; } @@ -498,8 +506,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) for_each_child_of_node(ports, node) { ret = fimc_md_parse_port_node(fmd, node, index); - if (ret < 0) + if (ret < 0) { + of_node_put(node); break; + } index++; } rpm_put: @@ -707,8 +717,10 @@ static int fimc_md_register_platform_entities(struct fimc_md *fmd, ret = fimc_md_register_platform_entity(fmd, pdev, plat_entity); put_device(&pdev->dev); - if (ret < 0) + if (ret < 0) { + of_node_put(node); break; + } } return ret; diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c index ac5e50e595be..bd5c46c3d4b7 100644 --- a/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/exynos4-is/mipi-csis.c @@ -736,6 +736,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, { struct device_node *node = pdev->dev.of_node; struct v4l2_of_endpoint endpoint; + int ret; if (of_property_read_u32(node, "clock-frequency", &state->clk_frequency)) @@ -751,7 +752,9 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, return -EINVAL; } /* Get port node and validate MIPI-CSI channel id. */ - v4l2_of_parse_endpoint(node, &endpoint); + ret = v4l2_of_parse_endpoint(node, &endpoint); + if (ret) + goto err; state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0; if (state->index >= CSIS_MAX_ENTITIES) @@ -764,9 +767,10 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, "samsung,csis-wclk"); state->num_lanes = endpoint.bus.mipi_csi2.num_data_lanes; - of_node_put(node); - return 0; +err: + of_node_put(node); + return ret; } static int s5pcsis_pm_resume(struct device *dev, bool runtime); diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 0bcfa553c1aa..f9e5245f26ac 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -449,7 +449,7 @@ void omap3isp_configure_bridge(struct isp_device *isp, case CCDC_INPUT_PARALLEL: ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL; ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT; - shift += parcfg->data_lane_shift * 2; + shift += parcfg->data_lane_shift; break; case CCDC_INPUT_CSI2A: @@ -2235,8 +2235,11 @@ static int isp_of_parse_node(struct device *dev, struct device_node *node, struct isp_bus_cfg *buscfg = &isd->bus; struct v4l2_of_endpoint vep; unsigned int i; + int ret; - v4l2_of_parse_endpoint(node, &vep); + ret = v4l2_of_parse_endpoint(node, &vep); + if (ret) + return ret; dev_dbg(dev, "parsing endpoint %s, interface %u\n", node->full_name, vep.base.port); @@ -2528,12 +2531,13 @@ static int isp_probe(struct platform_device *pdev) } /* Interrupt */ - isp->irq_num = platform_get_irq(pdev, 0); - if (isp->irq_num <= 0) { + ret = platform_get_irq(pdev, 0); + if (ret <= 0) { dev_err(isp->dev, "No IRQ resource\n"); ret = -ENODEV; goto error_iommu; } + isp->irq_num = ret; if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) { @@ -2599,6 +2603,7 @@ static const struct of_device_id omap3isp_of_table[] = { { .compatible = "ti,omap3-isp" }, { }, }; +MODULE_DEVICE_TABLE(of, omap3isp_of_table); static struct platform_driver omap3isp_driver = { .probe = isp_probe, diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c index bb3974c98e37..882310eb45cc 100644 --- a/drivers/media/platform/omap3isp/ispccdc.c +++ b/drivers/media/platform/omap3isp/ispccdc.c @@ -2421,7 +2421,7 @@ static int ccdc_link_validate(struct v4l2_subdev *sd, &((struct isp_bus_cfg *) media_entity_to_v4l2_subdev(link->source->entity) ->host_priv)->bus.parallel; - parallel_shift = parcfg->data_lane_shift * 2; + parallel_shift = parcfg->data_lane_shift; } else { parallel_shift = 0; } diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c index 84a96670e2e7..ac30a0f83780 100644 --- a/drivers/media/platform/omap3isp/isppreview.c +++ b/drivers/media/platform/omap3isp/isppreview.c @@ -1480,13 +1480,6 @@ static void preview_isr_buffer(struct isp_prev_device *prev) struct isp_buffer *buffer; int restart = 0; - if (prev->input == PREVIEW_INPUT_MEMORY) { - buffer = omap3isp_video_buffer_next(&prev->video_in); - if (buffer != NULL) - preview_set_inaddr(prev, buffer->dma); - pipe->state |= ISP_PIPELINE_IDLE_INPUT; - } - if (prev->output & PREVIEW_OUTPUT_MEMORY) { buffer = omap3isp_video_buffer_next(&prev->video_out); if (buffer != NULL) { @@ -1496,6 +1489,13 @@ static void preview_isr_buffer(struct isp_prev_device *prev) pipe->state |= ISP_PIPELINE_IDLE_OUTPUT; } + if (prev->input == PREVIEW_INPUT_MEMORY) { + buffer = omap3isp_video_buffer_next(&prev->video_in); + if (buffer != NULL) + preview_set_inaddr(prev, buffer->dma); + pipe->state |= ISP_PIPELINE_IDLE_INPUT; + } + switch (prev->state) { case ISP_PIPELINE_STREAM_SINGLESHOT: if (isp_pipeline_ready(pipe)) diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index 994dfc0813f6..2aff755ff77c 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -434,10 +434,68 @@ static void isp_video_buffer_queue(struct vb2_buffer *buf) } } +/* + * omap3isp_video_return_buffers - Return all queued buffers to videobuf2 + * @video: ISP video object + * @state: new state for the returned buffers + * + * Return all buffers queued on the video node to videobuf2 in the given state. + * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error + * when starting the stream, or VB2_BUF_STATE_ERROR otherwise. + * + * The function must be called with the video irqlock held. + */ +static void omap3isp_video_return_buffers(struct isp_video *video, + enum vb2_buffer_state state) +{ + while (!list_empty(&video->dmaqueue)) { + struct isp_buffer *buf; + + buf = list_first_entry(&video->dmaqueue, + struct isp_buffer, irqlist); + list_del(&buf->irqlist); + vb2_buffer_done(&buf->vb.vb2_buf, state); + } +} + +static int isp_video_start_streaming(struct vb2_queue *queue, + unsigned int count) +{ + struct isp_video_fh *vfh = vb2_get_drv_priv(queue); + struct isp_video *video = vfh->video; + struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); + unsigned long flags; + int ret; + + /* In sensor-to-memory mode, the stream can be started synchronously + * to the stream on command. In memory-to-memory mode, it will be + * started when buffers are queued on both the input and output. + */ + if (pipe->input) + return 0; + + ret = omap3isp_pipeline_set_stream(pipe, + ISP_PIPELINE_STREAM_CONTINUOUS); + if (ret < 0) { + spin_lock_irqsave(&video->irqlock, flags); + omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED); + spin_unlock_irqrestore(&video->irqlock, flags); + return ret; + } + + spin_lock_irqsave(&video->irqlock, flags); + if (list_empty(&video->dmaqueue)) + video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; + spin_unlock_irqrestore(&video->irqlock, flags); + + return 0; +} + static const struct vb2_ops isp_video_queue_ops = { .queue_setup = isp_video_queue_setup, .buf_prepare = isp_video_buffer_prepare, .buf_queue = isp_video_buffer_queue, + .start_streaming = isp_video_start_streaming, }; /* @@ -459,7 +517,7 @@ static const struct vb2_ops isp_video_queue_ops = { struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) { struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); - enum isp_pipeline_state state; + enum vb2_buffer_state vb_state; struct isp_buffer *buf; unsigned long flags; @@ -495,17 +553,19 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) /* Report pipeline errors to userspace on the capture device side. */ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { - state = VB2_BUF_STATE_ERROR; + vb_state = VB2_BUF_STATE_ERROR; pipe->error = false; } else { - state = VB2_BUF_STATE_DONE; + vb_state = VB2_BUF_STATE_DONE; } - vb2_buffer_done(&buf->vb.vb2_buf, state); + vb2_buffer_done(&buf->vb.vb2_buf, vb_state); spin_lock_irqsave(&video->irqlock, flags); if (list_empty(&video->dmaqueue)) { + enum isp_pipeline_state state; + spin_unlock_irqrestore(&video->irqlock, flags); if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) @@ -541,26 +601,16 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) * omap3isp_video_cancel_stream - Cancel stream on a video node * @video: ISP video object * - * Cancelling a stream mark all buffers on the video node as erroneous and makes - * sure no new buffer can be queued. + * Cancelling a stream returns all buffers queued on the video node to videobuf2 + * in the erroneous state and makes sure no new buffer can be queued. */ void omap3isp_video_cancel_stream(struct isp_video *video) { unsigned long flags; spin_lock_irqsave(&video->irqlock, flags); - - while (!list_empty(&video->dmaqueue)) { - struct isp_buffer *buf; - - buf = list_first_entry(&video->dmaqueue, - struct isp_buffer, irqlist); - list_del(&buf->irqlist); - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); - } - + omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR); video->error = true; - spin_unlock_irqrestore(&video->irqlock, flags); } @@ -1087,29 +1137,10 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) if (ret < 0) goto err_check_format; - /* In sensor-to-memory mode, the stream can be started synchronously - * to the stream on command. In memory-to-memory mode, it will be - * started when buffers are queued on both the input and output. - */ - if (pipe->input == NULL) { - ret = omap3isp_pipeline_set_stream(pipe, - ISP_PIPELINE_STREAM_CONTINUOUS); - if (ret < 0) - goto err_set_stream; - spin_lock_irqsave(&video->irqlock, flags); - if (list_empty(&video->dmaqueue)) - video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; - spin_unlock_irqrestore(&video->irqlock, flags); - } - mutex_unlock(&video->stream_lock); return 0; -err_set_stream: - mutex_lock(&video->queue_lock); - vb2_streamoff(&vfh->queue, type); - mutex_unlock(&video->queue_lock); err_check_format: media_entity_pipeline_stop(&video->video.entity); err_pipeline_start: diff --git a/drivers/media/platform/omap3isp/omap3isp.h b/drivers/media/platform/omap3isp/omap3isp.h index 190e259a6a2d..443e8f7673e2 100644 --- a/drivers/media/platform/omap3isp/omap3isp.h +++ b/drivers/media/platform/omap3isp/omap3isp.h @@ -33,9 +33,9 @@ enum isp_interface_type { * struct isp_parallel_cfg - Parallel interface configuration * @data_lane_shift: Data lane shifter * 0 - CAMEXT[13:0] -> CAM[13:0] - * 1 - CAMEXT[13:2] -> CAM[11:0] - * 2 - CAMEXT[13:4] -> CAM[9:0] - * 3 - CAMEXT[13:6] -> CAM[7:0] + * 2 - CAMEXT[13:2] -> CAM[11:0] + * 4 - CAMEXT[13:4] -> CAM[9:0] + * 6 - CAMEXT[13:6] -> CAM[7:0] * @clk_pol: Pixel clock polarity * 0 - Sample on rising edge, 1 - Sample on falling edge * @hs_pol: Horizontal synchronization polarity @@ -48,7 +48,7 @@ enum isp_interface_type { * 0 - Normal, 1 - One's complement */ struct isp_parallel_cfg { - unsigned int data_lane_shift:2; + unsigned int data_lane_shift:3; unsigned int clk_pol:1; unsigned int hs_pol:1; unsigned int vs_pol:1; diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c index 485f5259acb0..552789a69c86 100644 --- a/drivers/media/platform/rcar_jpu.c +++ b/drivers/media/platform/rcar_jpu.c @@ -1613,6 +1613,7 @@ static const struct of_device_id jpu_dt_ids[] = { { .compatible = "renesas,jpu-r8a7791" }, /* M2-W */ { .compatible = "renesas,jpu-r8a7792" }, /* V2H */ { .compatible = "renesas,jpu-r8a7793" }, /* M2-N */ + { .compatible = "renesas,rcar-gen2-jpu" }, { }, }; MODULE_DEVICE_TABLE(of, jpu_dt_ids); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 0434f02a7175..034b5c1d35a1 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -212,6 +212,14 @@ static struct mfc_control controls[] = { .menu_skip_mask = 0, }, { + .id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, + .type = V4L2_CTRL_TYPE_BUTTON, + .minimum = 0, + .maximum = 0, + .step = 0, + .default_value = 0, + }, + { .id = V4L2_CID_MPEG_VIDEO_VBV_SIZE, .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 0, @@ -1423,6 +1431,10 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE: ctx->force_frame_type = ctrl->val; break; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: + ctx->force_frame_type = + V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME; + break; case V4L2_CID_MPEG_VIDEO_VBV_SIZE: p->vbv_size = ctrl->val; break; diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c index b7fd695b9ed5..dc75a80794fb 100644 --- a/drivers/media/platform/soc_camera/rcar_vin.c +++ b/drivers/media/platform/soc_camera/rcar_vin.c @@ -143,6 +143,7 @@ #define RCAR_VIN_BT656 (1 << 3) enum chip_id { + RCAR_GEN3, RCAR_GEN2, RCAR_H1, RCAR_M1, @@ -1818,6 +1819,7 @@ static struct soc_camera_host_ops rcar_vin_host_ops = { #ifdef CONFIG_OF static const struct of_device_id rcar_vin_of_table[] = { + { .compatible = "renesas,vin-r8a7795", .data = (void *)RCAR_GEN3 }, { .compatible = "renesas,vin-r8a7794", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,vin-r8a7793", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 }, diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 90c87f2b4ec0..b9f369c0fb94 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c @@ -213,8 +213,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { - struct soc_camera_device *icd = container_of(vq, - struct soc_camera_device, vb2_vidq); + struct soc_camera_device *icd = soc_camera_from_vb2q(vq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; @@ -361,8 +360,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb) static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct soc_camera_device *icd = container_of(vb->vb2_queue, - struct soc_camera_device, vb2_vidq); + struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); @@ -413,8 +411,7 @@ error: static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct soc_camera_device *icd = container_of(vb->vb2_queue, - struct soc_camera_device, vb2_vidq); + struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); struct sh_mobile_ceu_dev *pcdev = ici->priv; @@ -444,8 +441,7 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct soc_camera_device *icd = container_of(vb->vb2_queue, - struct soc_camera_device, vb2_vidq); + struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; @@ -460,7 +456,7 @@ static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q) { - struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq); + struct soc_camera_device *icd = soc_camera_from_vb2q(q); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct list_head *buf_head, *tmp; diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c index 69d7fe4471c2..2c0015b1264d 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c @@ -118,7 +118,7 @@ int c8sectpfe_frontend_attach(struct dvb_frontend **fe, struct channel_info *tsin, int chan_num) { struct tda18212_config *tda18212; - struct stv6110x_devctl *fe2; + const struct stv6110x_devctl *fe2; struct i2c_client *client; struct i2c_board_info tda18212_info = { .type = "tda18212", diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile index be680f839e77..e236059a60ad 100644 --- a/drivers/media/platform/ti-vpe/Makefile +++ b/drivers/media/platform/ti-vpe/Makefile @@ -3,3 +3,7 @@ obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o ti-vpe-y := vpe.o sc.o csc.o vpdma.o ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG + +obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o + +ti-cal-y := cal.o diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c new file mode 100644 index 000000000000..35fa1071c5b2 --- /dev/null +++ b/drivers/media/platform/ti-vpe/cal.c @@ -0,0 +1,1971 @@ +/* + * TI CAL camera interface driver + * + * Copyright (c) 2015 Texas Instruments Inc. + * Benoit Parrot, <bparrot@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/ioctl.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/videodev2.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> + +#include <media/v4l2-of.h> +#include <media/v4l2-async.h> +#include <media/v4l2-common.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-event.h> +#include <media/v4l2-common.h> +#include <media/videobuf2-core.h> +#include <media/videobuf2-dma-contig.h> +#include "cal_regs.h" + +#define CAL_MODULE_NAME "cal" + +#define MAX_WIDTH 1920 +#define MAX_HEIGHT 1200 + +#define CAL_VERSION "0.1.0" + +MODULE_DESCRIPTION("TI CAL driver"); +MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(CAL_VERSION); + +static unsigned video_nr = -1; +module_param(video_nr, uint, 0644); +MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect"); + +static unsigned debug; +module_param(debug, uint, 0644); +MODULE_PARM_DESC(debug, "activates debug info"); + +/* timeperframe: min/max and default */ +static const struct v4l2_fract + tpf_default = {.numerator = 1001, .denominator = 30000}; + +#define cal_dbg(level, caldev, fmt, arg...) \ + v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg) +#define cal_info(caldev, fmt, arg...) \ + v4l2_info(&caldev->v4l2_dev, fmt, ##arg) +#define cal_err(caldev, fmt, arg...) \ + v4l2_err(&caldev->v4l2_dev, fmt, ##arg) + +#define ctx_dbg(level, ctx, fmt, arg...) \ + v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg) +#define ctx_info(ctx, fmt, arg...) \ + v4l2_info(&ctx->v4l2_dev, fmt, ##arg) +#define ctx_err(ctx, fmt, arg...) \ + v4l2_err(&ctx->v4l2_dev, fmt, ##arg) + +#define CAL_NUM_INPUT 1 +#define CAL_NUM_CONTEXT 2 + +#define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16)) + +#define reg_read(dev, offset) ioread32(dev->base + offset) +#define reg_write(dev, offset, val) iowrite32(val, dev->base + offset) + +#define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \ + mask) +#define reg_write_field(dev, offset, field, mask) { \ + u32 val = reg_read(dev, offset); \ + set_field(&val, field, mask); \ + reg_write(dev, offset, val); } + +/* ------------------------------------------------------------------ + * Basic structures + * ------------------------------------------------------------------ + */ + +struct cal_fmt { + u32 fourcc; + u32 code; + u8 depth; +}; + +static struct cal_fmt cal_formats[] = { + { + .fourcc = V4L2_PIX_FMT_YUYV, + .code = MEDIA_BUS_FMT_YUYV8_2X8, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_UYVY, + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_YVYU, + .code = MEDIA_BUS_FMT_YVYU8_2X8, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_VYUY, + .code = MEDIA_BUS_FMT_VYUY8_2X8, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */ + .code = MEDIA_BUS_FMT_RGB565_2X8_LE, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */ + .code = MEDIA_BUS_FMT_RGB565_2X8_BE, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */ + .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */ + .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */ + .code = MEDIA_BUS_FMT_RGB888_2X12_LE, + .depth = 24, + }, { + .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */ + .code = MEDIA_BUS_FMT_RGB888_2X12_BE, + .depth = 24, + }, { + .fourcc = V4L2_PIX_FMT_RGB32, /* argb */ + .code = MEDIA_BUS_FMT_ARGB8888_1X32, + .depth = 32, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR8, + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .depth = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG8, + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .depth = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG8, + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .depth = 8, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB8, + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .depth = 8, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR10, + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG10, + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG10, + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB10, + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR12, + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG12, + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG12, + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .depth = 16, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB12, + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .depth = 16, + }, +}; + +/* Print Four-character-code (FOURCC) */ +static char *fourcc_to_str(u32 fmt) +{ + static char code[5]; + + code[0] = (unsigned char)(fmt & 0xff); + code[1] = (unsigned char)((fmt >> 8) & 0xff); + code[2] = (unsigned char)((fmt >> 16) & 0xff); + code[3] = (unsigned char)((fmt >> 24) & 0xff); + code[4] = '\0'; + + return code; +} + +/* buffer for one video frame */ +struct cal_buffer { + /* common v4l buffer stuff -- must be first */ + struct vb2_v4l2_buffer vb; + struct list_head list; + const struct cal_fmt *fmt; +}; + +struct cal_dmaqueue { + struct list_head active; + + /* Counters to control fps rate */ + int frame; + int ini_jiffies; +}; + +struct cm_data { + void __iomem *base; + struct resource *res; + + unsigned int camerrx_control; + + struct platform_device *pdev; +}; + +struct cc_data { + void __iomem *base; + struct resource *res; + + struct platform_device *pdev; +}; + +/* + * there is one cal_dev structure in the driver, it is shared by + * all instances. + */ +struct cal_dev { + int irq; + void __iomem *base; + struct resource *res; + struct platform_device *pdev; + struct v4l2_device v4l2_dev; + + /* Control Module handle */ + struct cm_data *cm; + /* Camera Core Module handle */ + struct cc_data *cc[CAL_NUM_CSI2_PORTS]; + + struct cal_ctx *ctx[CAL_NUM_CONTEXT]; +}; + +/* + * There is one cal_ctx structure for each camera core context. + */ +struct cal_ctx { + struct v4l2_device v4l2_dev; + struct v4l2_ctrl_handler ctrl_handler; + struct video_device vdev; + struct v4l2_async_notifier notifier; + struct v4l2_subdev *sensor; + struct v4l2_of_endpoint endpoint; + + struct v4l2_async_subdev asd; + struct v4l2_async_subdev *asd_list[1]; + + struct v4l2_fh fh; + struct cal_dev *dev; + struct cc_data *cc; + + /* v4l2_ioctl mutex */ + struct mutex mutex; + /* v4l2 buffers lock */ + spinlock_t slock; + + /* Several counters */ + unsigned long jiffies; + + struct vb2_alloc_ctx *alloc_ctx; + struct cal_dmaqueue vidq; + + /* Input Number */ + int input; + + /* video capture */ + const struct cal_fmt *fmt; + /* Used to store current pixel format */ + struct v4l2_format v_fmt; + /* Used to store current mbus frame format */ + struct v4l2_mbus_framefmt m_fmt; + + /* Current subdev enumerated format */ + struct cal_fmt *active_fmt[ARRAY_SIZE(cal_formats)]; + int num_active_fmt; + + struct v4l2_fract timeperframe; + unsigned int sequence; + unsigned int external_rate; + struct vb2_queue vb_vidq; + unsigned int seq_count; + unsigned int csi2_port; + unsigned int virtual_channel; + + /* Pointer pointing to current v4l2_buffer */ + struct cal_buffer *cur_frm; + /* Pointer pointing to next v4l2_buffer */ + struct cal_buffer *next_frm; +}; + +static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx, + u32 pixelformat) +{ + const struct cal_fmt *fmt; + unsigned int k; + + for (k = 0; k < ctx->num_active_fmt; k++) { + fmt = ctx->active_fmt[k]; + if (fmt->fourcc == pixelformat) + return fmt; + } + + return NULL; +} + +static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx, + u32 code) +{ + const struct cal_fmt *fmt; + unsigned int k; + + for (k = 0; k < ctx->num_active_fmt; k++) { + fmt = ctx->active_fmt[k]; + if (fmt->code == code) + return fmt; + } + + return NULL; +} + +static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n) +{ + return container_of(n, struct cal_ctx, notifier); +} + +static inline int get_field(u32 value, u32 mask) +{ + return (value & mask) >> __ffs(mask); +} + +static inline void set_field(u32 *valp, u32 field, u32 mask) +{ + u32 val = *valp; + + val &= ~mask; + val |= (field << __ffs(mask)) & mask; + *valp = val; +} + +/* + * Control Module block access + */ +static struct cm_data *cm_create(struct cal_dev *dev) +{ + struct platform_device *pdev = dev->pdev; + struct cm_data *cm; + + cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL); + if (!cm) + return ERR_PTR(-ENOMEM); + + cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "camerrx_control"); + cm->base = devm_ioremap_resource(&pdev->dev, cm->res); + if (IS_ERR(cm->base)) { + cal_err(dev, "failed to ioremap\n"); + return ERR_CAST(cm->base); + } + + cal_dbg(1, dev, "ioresource %s at %pa - %pa\n", + cm->res->name, &cm->res->start, &cm->res->end); + + return cm; +} + +static void camerarx_phy_enable(struct cal_ctx *ctx) +{ + u32 val; + + if (!ctx->dev->cm->base) { + ctx_err(ctx, "cm not mapped\n"); + return; + } + + val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL); + if (ctx->csi2_port == 1) { + set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK); + set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK); + /* enable all lanes by default */ + set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK); + set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK); + } else if (ctx->csi2_port == 2) { + set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK); + set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK); + /* enable all lanes by default */ + set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK); + set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK); + } + reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val); +} + +static void camerarx_phy_disable(struct cal_ctx *ctx) +{ + u32 val; + + if (!ctx->dev->cm->base) { + ctx_err(ctx, "cm not mapped\n"); + return; + } + + val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL); + if (ctx->csi2_port == 1) + set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK); + else if (ctx->csi2_port == 2) + set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK); + reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val); +} + +/* + * Camera Instance access block + */ +static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core) +{ + struct platform_device *pdev = dev->pdev; + struct cc_data *cc; + + cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL); + if (!cc) + return ERR_PTR(-ENOMEM); + + cc->res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, + (core == 0) ? + "cal_rx_core0" : + "cal_rx_core1"); + cc->base = devm_ioremap_resource(&pdev->dev, cc->res); + if (IS_ERR(cc->base)) { + cal_err(dev, "failed to ioremap\n"); + return ERR_CAST(cc->base); + } + + cal_dbg(1, dev, "ioresource %s at %pa - %pa\n", + cc->res->name, &cc->res->start, &cc->res->end); + + return cc; +} + +/* + * Get Revision and HW info + */ +static void cal_get_hwinfo(struct cal_dev *dev) +{ + u32 revision = 0; + u32 hwinfo = 0; + + revision = reg_read(dev, CAL_HL_REVISION); + cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n", + revision); + + hwinfo = reg_read(dev, CAL_HL_HWINFO); + cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n", + hwinfo); +} + +static inline int cal_runtime_get(struct cal_dev *dev) +{ + int r; + + r = pm_runtime_get_sync(&dev->pdev->dev); + + return r; +} + +static inline void cal_runtime_put(struct cal_dev *dev) +{ + pm_runtime_put_sync(&dev->pdev->dev); +} + +static void cal_quickdump_regs(struct cal_dev *dev) +{ + cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, + (__force const void *)dev->base, + resource_size(dev->res), false); + + if (dev->ctx[0]) { + cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n", + &dev->ctx[0]->cc->res->start); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, + (__force const void *)dev->ctx[0]->cc->base, + resource_size(dev->ctx[0]->cc->res), + false); + } + + if (dev->ctx[1]) { + cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n", + &dev->ctx[1]->cc->res->start); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, + (__force const void *)dev->ctx[1]->cc->base, + resource_size(dev->ctx[1]->cc->res), + false); + } + + cal_info(dev, "CAMERRX_Control Registers @ %pa:\n", + &dev->cm->res->start); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, + (__force const void *)dev->cm->base, + resource_size(dev->cm->res), false); +} + +/* + * Enable the expected IRQ sources + */ +static void enable_irqs(struct cal_ctx *ctx) +{ + /* Enable IRQ_WDMA_END 0/1 */ + reg_write_field(ctx->dev, + CAL_HL_IRQENABLE_SET(2), + CAL_HL_IRQ_ENABLE, + CAL_HL_IRQ_MASK(ctx->csi2_port)); + /* Enable IRQ_WDMA_START 0/1 */ + reg_write_field(ctx->dev, + CAL_HL_IRQENABLE_SET(3), + CAL_HL_IRQ_ENABLE, + CAL_HL_IRQ_MASK(ctx->csi2_port)); + /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */ + reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000); +} + +static void disable_irqs(struct cal_ctx *ctx) +{ + /* Disable IRQ_WDMA_END 0/1 */ + reg_write_field(ctx->dev, + CAL_HL_IRQENABLE_CLR(2), + CAL_HL_IRQ_CLEAR, + CAL_HL_IRQ_MASK(ctx->csi2_port)); + /* Disable IRQ_WDMA_START 0/1 */ + reg_write_field(ctx->dev, + CAL_HL_IRQENABLE_CLR(3), + CAL_HL_IRQ_CLEAR, + CAL_HL_IRQ_MASK(ctx->csi2_port)); + /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */ + reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0); +} + +static void csi2_init(struct cal_ctx *ctx) +{ + int i; + u32 val; + + val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)); + set_field(&val, CAL_GEN_ENABLE, + CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK); + set_field(&val, CAL_GEN_ENABLE, + CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK); + set_field(&val, CAL_GEN_DISABLE, + CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK); + set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK); + reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val); + ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port, + reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port))); + + val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)); + set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL, + CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK); + set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON, + CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK); + reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val); + for (i = 0; i < 10; i++) { + if (reg_read_field(ctx->dev, + CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), + CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) == + CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON) + break; + usleep_range(1000, 1100); + } + ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port, + reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port))); + + val = reg_read(ctx->dev, CAL_CTRL); + set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK); + set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK); + set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED, + CAL_CTRL_POSTED_WRITES_MASK); + set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK); + set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK); + reg_write(ctx->dev, CAL_CTRL, val); + ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL)); +} + +static void csi2_lane_config(struct cal_ctx *ctx) +{ + u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)); + u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK; + u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK; + struct v4l2_of_bus_mipi_csi2 *mipi_csi2 = &ctx->endpoint.bus.mipi_csi2; + int lane; + + set_field(&val, mipi_csi2->clock_lane + 1, lane_mask); + set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask); + for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) { + /* + * Every lane are one nibble apart starting with the + * clock followed by the data lanes so shift masks by 4. + */ + lane_mask <<= 4; + polarity_mask <<= 4; + set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask); + set_field(&val, mipi_csi2->lane_polarities[lane + 1], + polarity_mask); + } + + reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val); + ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", + ctx->csi2_port, val); +} + +static void csi2_ppi_enable(struct cal_ctx *ctx) +{ + reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port), + CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK); +} + +static void csi2_ppi_disable(struct cal_ctx *ctx) +{ + reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port), + CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK); +} + +static void csi2_ctx_config(struct cal_ctx *ctx) +{ + u32 val; + + val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)); + set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK); + /* + * DT type: MIPI CSI-2 Specs + * 0x1: All - DT filter is disabled + * 0x24: RGB888 1 pixel = 3 bytes + * 0x2B: RAW10 4 pixels = 5 bytes + * 0x2A: RAW8 1 pixel = 1 byte + * 0x1E: YUV422 2 pixels = 4 bytes + */ + set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK); + /* Virtual Channel from the CSI2 sensor usually 0! */ + set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK); + /* NUM_LINES_PER_FRAME => 0 means auto detect */ + set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK); + set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK); + set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE, + CAL_CSI2_CTX_PACK_MODE_MASK); + reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val); + ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port, + reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port))); +} + +static void pix_proc_config(struct cal_ctx *ctx) +{ + u32 val; + + val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)); + set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK); + set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK); + set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK); + set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK); + set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK); + set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK); + reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val); + ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port, + reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port))); +} + +static void cal_wr_dma_config(struct cal_ctx *ctx, + unsigned int width) +{ + u32 val; + + val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)); + set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK); + set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, + CAL_WR_DMA_CTRL_DTAG_MASK); + set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, + CAL_WR_DMA_CTRL_MODE_MASK); + set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR, + CAL_WR_DMA_CTRL_PATTERN_MASK); + set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK); + reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val); + ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port, + reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port))); + + /* + * width/16 not sure but giving it a whirl. + * zero does not work right + */ + reg_write_field(ctx->dev, + CAL_WR_DMA_OFST(ctx->csi2_port), + (width / 16), + CAL_WR_DMA_OFST_MASK); + ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port, + reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port))); + + val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)); + /* 64 bit word means no skipping */ + set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK); + /* + * (width*8)/64 this should be size of an entire line + * in 64bit word but 0 means all data until the end + * is detected automagically + */ + set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK); + reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val); + ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port, + reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port))); +} + +static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr) +{ + reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr); +} + +/* + * TCLK values are OK at their reset values + */ +#define TCLK_TERM 0 +#define TCLK_MISS 1 +#define TCLK_SETTLE 14 +#define THS_SETTLE 15 + +static void csi2_phy_config(struct cal_ctx *ctx) +{ + unsigned int reg0, reg1; + unsigned int ths_term, ths_settle; + unsigned int ddrclkperiod_us; + + /* + * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2. + */ + ddrclkperiod_us = ctx->external_rate / 2000000; + ddrclkperiod_us = 1000000 / ddrclkperiod_us; + ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us); + + ths_term = 20000 / ddrclkperiod_us; + ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term; + ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term); + + /* + * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1. + * Since CtrlClk is fixed at 96Mhz then we get + * ths_settle = floor(176.3 / 10.416) - 1 = 15 + * If we ever switch to a dynamic clock then this code might be useful + * + * unsigned int ctrlclkperiod_us; + * ctrlclkperiod_us = 96000000 / 1000000; + * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us; + * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us); + + * ths_settle = 176300 / ctrlclkperiod_us; + * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle; + */ + + ths_settle = THS_SETTLE; + ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle); + + reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0); + set_field(®0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE, + CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK); + set_field(®0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK); + set_field(®0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK); + + ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0); + reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0); + + reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1); + set_field(®1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK); + set_field(®1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK); + set_field(®1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK); + set_field(®1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK); + + ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1); + reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1); +} + +static int cal_get_external_info(struct cal_ctx *ctx) +{ + struct v4l2_ctrl *ctrl; + + ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE); + if (!ctrl) { + ctx_err(ctx, "no pixel rate control in subdev: %s\n", + ctx->sensor->name); + return -EPIPE; + } + + ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl); + ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate); + + return 0; +} + +static inline void cal_schedule_next_buffer(struct cal_ctx *ctx) +{ + struct cal_dmaqueue *dma_q = &ctx->vidq; + struct cal_buffer *buf; + unsigned long addr; + + buf = list_entry(dma_q->active.next, struct cal_buffer, list); + ctx->next_frm = buf; + list_del(&buf->list); + + addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); + cal_wr_dma_addr(ctx, addr); +} + +static inline void cal_process_buffer_complete(struct cal_ctx *ctx) +{ + ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); + ctx->cur_frm->vb.field = ctx->m_fmt.field; + ctx->cur_frm->vb.sequence = ctx->sequence++; + + vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); + ctx->cur_frm = ctx->next_frm; +} + +#define isvcirqset(irq, vc, ff) (irq & \ + (CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK)) + +#define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port)) + +static irqreturn_t cal_irq(int irq_cal, void *data) +{ + struct cal_dev *dev = (struct cal_dev *)data; + struct cal_ctx *ctx; + struct cal_dmaqueue *dma_q; + u32 irqst2, irqst3; + + /* Check which DMA just finished */ + irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2)); + if (irqst2) { + /* Clear Interrupt status */ + reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2); + + /* Need to check both port */ + if (isportirqset(irqst2, 1)) { + ctx = dev->ctx[0]; + + if (ctx->cur_frm != ctx->next_frm) + cal_process_buffer_complete(ctx); + } + + if (isportirqset(irqst2, 2)) { + ctx = dev->ctx[1]; + + if (ctx->cur_frm != ctx->next_frm) + cal_process_buffer_complete(ctx); + } + } + + /* Check which DMA just started */ + irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3)); + if (irqst3) { + /* Clear Interrupt status */ + reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3); + + /* Need to check both port */ + if (isportirqset(irqst3, 1)) { + ctx = dev->ctx[0]; + dma_q = &ctx->vidq; + + spin_lock(&ctx->slock); + if (!list_empty(&dma_q->active) && + ctx->cur_frm == ctx->next_frm) + cal_schedule_next_buffer(ctx); + spin_unlock(&ctx->slock); + } + + if (isportirqset(irqst3, 2)) { + ctx = dev->ctx[1]; + dma_q = &ctx->vidq; + + spin_lock(&ctx->slock); + if (!list_empty(&dma_q->active) && + ctx->cur_frm == ctx->next_frm) + cal_schedule_next_buffer(ctx); + spin_unlock(&ctx->slock); + } + } + + return IRQ_HANDLED; +} + +/* + * video ioctls + */ +static int cal_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + struct cal_ctx *ctx = video_drvdata(file); + + strlcpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver)); + strlcpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card)); + + snprintf(cap->bus_info, sizeof(cap->bus_info), + "platform:%s", ctx->v4l2_dev.name); + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | + V4L2_CAP_READWRITE; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; +} + +static int cal_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_fmt *fmt = NULL; + + if (f->index >= ctx->num_active_fmt) + return -EINVAL; + + fmt = ctx->active_fmt[f->index]; + + f->pixelformat = fmt->fourcc; + f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + return 0; +} + +static int __subdev_get_format(struct cal_ctx *ctx, + struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev_format sd_fmt; + struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; + int ret; + + if (!ctx->sensor) + return -EINVAL; + + sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + sd_fmt.pad = 0; + + ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt); + if (ret) + return ret; + + *fmt = *mbus_fmt; + + ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__, + fmt->width, fmt->height, fmt->code); + + return 0; +} + +static int __subdev_set_format(struct cal_ctx *ctx, + struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev_format sd_fmt; + struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; + int ret; + + if (!ctx->sensor) + return -EINVAL; + + sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + sd_fmt.pad = 0; + *mbus_fmt = *fmt; + + ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt); + if (ret) + return ret; + + ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__, + fmt->width, fmt->height, fmt->code); + + return 0; +} + +static int cal_calc_format_size(struct cal_ctx *ctx, + const struct cal_fmt *fmt, + struct v4l2_format *f) +{ + if (!fmt) { + ctx_dbg(3, ctx, "No cal_fmt provided!\n"); + return -EINVAL; + } + + v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2, + &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0); + f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width, + fmt->depth >> 3); + f->fmt.pix.sizeimage = f->fmt.pix.height * + f->fmt.pix.bytesperline; + + ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n", + __func__, fourcc_to_str(f->fmt.pix.pixelformat), + f->fmt.pix.width, f->fmt.pix.height, + f->fmt.pix.bytesperline, f->fmt.pix.sizeimage); + + return 0; +} + +static int cal_g_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + + *f = ctx->v_fmt; + + return 0; +} + +static int cal_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_fmt *fmt; + struct v4l2_subdev_frame_size_enum fse; + int ret, found; + + fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat); + if (!fmt) { + ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n", + f->fmt.pix.pixelformat); + + /* Just get the first one enumerated */ + fmt = ctx->active_fmt[0]; + f->fmt.pix.pixelformat = fmt->fourcc; + } + + f->fmt.pix.field = ctx->v_fmt.fmt.pix.field; + + /* check for/find a valid width/height */ + ret = 0; + found = false; + fse.pad = 0; + fse.code = fmt->code; + fse.which = V4L2_SUBDEV_FORMAT_ACTIVE; + for (fse.index = 0; ; fse.index++) { + ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, + NULL, &fse); + if (ret) + break; + + if ((f->fmt.pix.width == fse.max_width) && + (f->fmt.pix.height == fse.max_height)) { + found = true; + break; + } else if ((f->fmt.pix.width >= fse.min_width) && + (f->fmt.pix.width <= fse.max_width) && + (f->fmt.pix.height >= fse.min_height) && + (f->fmt.pix.height <= fse.max_height)) { + found = true; + break; + } + } + + if (!found) { + /* use existing values as default */ + f->fmt.pix.width = ctx->v_fmt.fmt.pix.width; + f->fmt.pix.height = ctx->v_fmt.fmt.pix.height; + } + + /* + * Use current colorspace for now, it will get + * updated properly during s_fmt + */ + f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace; + return cal_calc_format_size(ctx, fmt, f); +} + +static int cal_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + struct vb2_queue *q = &ctx->vb_vidq; + const struct cal_fmt *fmt; + struct v4l2_mbus_framefmt mbus_fmt; + int ret; + + if (vb2_is_busy(q)) { + ctx_dbg(3, ctx, "%s device busy\n", __func__); + return -EBUSY; + } + + ret = cal_try_fmt_vid_cap(file, priv, f); + if (ret < 0) + return ret; + + fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat); + + v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code); + + ret = __subdev_set_format(ctx, &mbus_fmt); + if (ret) + return ret; + + /* Just double check nothing has gone wrong */ + if (mbus_fmt.code != fmt->code) { + ctx_dbg(3, ctx, + "%s subdev changed format on us, this should not happen\n", + __func__); + return -EINVAL; + } + + v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt); + ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc; + cal_calc_format_size(ctx, fmt, &ctx->v_fmt); + ctx->fmt = fmt; + ctx->m_fmt = mbus_fmt; + *f = ctx->v_fmt; + + return 0; +} + +static int cal_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_fmt *fmt; + struct v4l2_subdev_frame_size_enum fse; + int ret; + + /* check for valid format */ + fmt = find_format_by_pix(ctx, fsize->pixel_format); + if (!fmt) { + ctx_dbg(3, ctx, "Invalid pixel code: %x\n", + fsize->pixel_format); + return -EINVAL; + } + + fse.index = fsize->index; + fse.pad = 0; + fse.code = fmt->code; + + ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse); + if (ret) + return -EINVAL; + + ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n", + __func__, fse.index, fse.code, fse.min_width, fse.max_width, + fse.min_height, fse.max_height); + + fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; + fsize->discrete.width = fse.max_width; + fsize->discrete.height = fse.max_height; + + return 0; +} + +static int cal_enum_input(struct file *file, void *priv, + struct v4l2_input *inp) +{ + if (inp->index >= CAL_NUM_INPUT) + return -EINVAL; + + inp->type = V4L2_INPUT_TYPE_CAMERA; + sprintf(inp->name, "Camera %u", inp->index); + return 0; +} + +static int cal_g_input(struct file *file, void *priv, unsigned int *i) +{ + struct cal_ctx *ctx = video_drvdata(file); + + *i = ctx->input; + return 0; +} + +static int cal_s_input(struct file *file, void *priv, unsigned int i) +{ + struct cal_ctx *ctx = video_drvdata(file); + + if (i >= CAL_NUM_INPUT) + return -EINVAL; + + ctx->input = i; + return 0; +} + +/* timeperframe is arbitrary and continuous */ +static int cal_enum_frameintervals(struct file *file, void *priv, + struct v4l2_frmivalenum *fival) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_fmt *fmt; + struct v4l2_subdev_frame_size_enum fse; + int ret; + + if (fival->index) + return -EINVAL; + + fmt = find_format_by_pix(ctx, fival->pixel_format); + if (!fmt) + return -EINVAL; + + /* check for valid width/height */ + ret = 0; + fse.pad = 0; + fse.code = fmt->code; + fse.which = V4L2_SUBDEV_FORMAT_ACTIVE; + for (fse.index = 0; ; fse.index++) { + ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, + NULL, &fse); + if (ret) + return -EINVAL; + + if ((fival->width == fse.max_width) && + (fival->height == fse.max_height)) + break; + else if ((fival->width >= fse.min_width) && + (fival->width <= fse.max_width) && + (fival->height >= fse.min_height) && + (fival->height <= fse.max_height)) + break; + + return -EINVAL; + } + + fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; + fival->discrete.numerator = 1; + fival->discrete.denominator = 30; + + return 0; +} + +/* + * Videobuf operations + */ +static int cal_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], void *alloc_ctxs[]) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vq); + unsigned size = ctx->v_fmt.fmt.pix.sizeimage; + + if (vq->num_buffers + *nbuffers < 3) + *nbuffers = 3 - vq->num_buffers; + alloc_ctxs[0] = ctx->alloc_ctx; + + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } + + *nplanes = 1; + sizes[0] = size; + + ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]); + + return 0; +} + +static int cal_buffer_prepare(struct vb2_buffer *vb) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct cal_buffer *buf = container_of(vb, struct cal_buffer, + vb.vb2_buf); + unsigned long size; + + if (WARN_ON(!ctx->fmt)) + return -EINVAL; + + size = ctx->v_fmt.fmt.pix.sizeimage; + if (vb2_plane_size(vb, 0) < size) { + ctx_err(ctx, + "data will not fit into plane (%lu < %lu)\n", + vb2_plane_size(vb, 0), size); + return -EINVAL; + } + + vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); + return 0; +} + +static void cal_buffer_queue(struct vb2_buffer *vb) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct cal_buffer *buf = container_of(vb, struct cal_buffer, + vb.vb2_buf); + struct cal_dmaqueue *vidq = &ctx->vidq; + unsigned long flags = 0; + + /* recheck locking */ + spin_lock_irqsave(&ctx->slock, flags); + list_add_tail(&buf->list, &vidq->active); + spin_unlock_irqrestore(&ctx->slock, flags); +} + +static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vq); + struct cal_dmaqueue *dma_q = &ctx->vidq; + struct cal_buffer *buf, *tmp; + unsigned long addr = 0; + unsigned long flags; + int ret; + + spin_lock_irqsave(&ctx->slock, flags); + if (list_empty(&dma_q->active)) { + spin_unlock_irqrestore(&ctx->slock, flags); + ctx_dbg(3, ctx, "buffer queue is empty\n"); + return -EIO; + } + + buf = list_entry(dma_q->active.next, struct cal_buffer, list); + ctx->cur_frm = buf; + ctx->next_frm = buf; + list_del(&buf->list); + spin_unlock_irqrestore(&ctx->slock, flags); + + addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0); + ctx->sequence = 0; + + ret = cal_get_external_info(ctx); + if (ret < 0) + goto err; + + cal_runtime_get(ctx->dev); + + enable_irqs(ctx); + camerarx_phy_enable(ctx); + csi2_init(ctx); + csi2_phy_config(ctx); + csi2_lane_config(ctx); + csi2_ctx_config(ctx); + pix_proc_config(ctx); + cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline); + cal_wr_dma_addr(ctx, addr); + csi2_ppi_enable(ctx); + + if (ctx->sensor) { + if (v4l2_subdev_call(ctx->sensor, video, s_stream, 1)) { + ctx_err(ctx, "stream on failed in subdev\n"); + cal_runtime_put(ctx->dev); + ret = -EINVAL; + goto err; + } + } + + if (debug >= 4) + cal_quickdump_regs(ctx->dev); + + return 0; + +err: + list_for_each_entry_safe(buf, tmp, &dma_q->active, list) { + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); + } + return ret; +} + +static void cal_stop_streaming(struct vb2_queue *vq) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vq); + struct cal_dmaqueue *dma_q = &ctx->vidq; + struct cal_buffer *buf, *tmp; + unsigned long flags; + + if (ctx->sensor) { + if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0)) + ctx_err(ctx, "stream off failed in subdev\n"); + } + + csi2_ppi_disable(ctx); + disable_irqs(ctx); + + /* Release all active buffers */ + spin_lock_irqsave(&ctx->slock, flags); + list_for_each_entry_safe(buf, tmp, &dma_q->active, list) { + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + } + + if (ctx->cur_frm == ctx->next_frm) { + vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); + } else { + vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); + vb2_buffer_done(&ctx->next_frm->vb.vb2_buf, + VB2_BUF_STATE_ERROR); + } + ctx->cur_frm = NULL; + ctx->next_frm = NULL; + spin_unlock_irqrestore(&ctx->slock, flags); + + cal_runtime_put(ctx->dev); +} + +static struct vb2_ops cal_video_qops = { + .queue_setup = cal_queue_setup, + .buf_prepare = cal_buffer_prepare, + .buf_queue = cal_buffer_queue, + .start_streaming = cal_start_streaming, + .stop_streaming = cal_stop_streaming, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +static const struct v4l2_file_operations cal_fops = { + .owner = THIS_MODULE, + .open = v4l2_fh_open, + .release = vb2_fop_release, + .read = vb2_fop_read, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */ + .mmap = vb2_fop_mmap, +}; + +static const struct v4l2_ioctl_ops cal_ioctl_ops = { + .vidioc_querycap = cal_querycap, + .vidioc_enum_fmt_vid_cap = cal_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = cal_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = cal_s_fmt_vid_cap, + .vidioc_enum_framesizes = cal_enum_framesizes, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_enum_input = cal_enum_input, + .vidioc_g_input = cal_g_input, + .vidioc_s_input = cal_s_input, + .vidioc_enum_frameintervals = cal_enum_frameintervals, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_log_status = v4l2_ctrl_log_status, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static struct video_device cal_videodev = { + .name = CAL_MODULE_NAME, + .fops = &cal_fops, + .ioctl_ops = &cal_ioctl_ops, + .minor = -1, + .release = video_device_release_empty, +}; + +/* ----------------------------------------------------------------- + * Initialization and module stuff + * ------------------------------------------------------------------ + */ +static int cal_complete_ctx(struct cal_ctx *ctx); + +static int cal_async_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_subdev *asd) +{ + struct cal_ctx *ctx = notifier_to_ctx(notifier); + struct v4l2_subdev_mbus_code_enum mbus_code; + int ret = 0; + int i, j, k; + + if (ctx->sensor) { + ctx_info(ctx, "Rejecting subdev %s (Already set!!)", + subdev->name); + return 0; + } + + ctx->sensor = subdev; + ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name); + + /* Enumerate sub device formats and enable all matching local formats */ + ctx->num_active_fmt = 0; + for (j = 0, i = 0; ret != -EINVAL; ++j) { + struct cal_fmt *fmt; + + memset(&mbus_code, 0, sizeof(mbus_code)); + mbus_code.index = j; + ret = v4l2_subdev_call(subdev, pad, enum_mbus_code, + NULL, &mbus_code); + if (ret) + continue; + + ctx_dbg(2, ctx, + "subdev %s: code: %04x idx: %d\n", + subdev->name, mbus_code.code, j); + + for (k = 0; k < ARRAY_SIZE(cal_formats); k++) { + fmt = &cal_formats[k]; + + if (mbus_code.code == fmt->code) { + ctx->active_fmt[i] = fmt; + ctx_dbg(2, ctx, + "matched fourcc: %s: code: %04x idx: %d\n", + fourcc_to_str(fmt->fourcc), + fmt->code, i); + ctx->num_active_fmt = ++i; + } + } + } + + if (i == 0) { + ctx_err(ctx, "No suitable format reported by subdev %s\n", + subdev->name); + return -EINVAL; + } + + cal_complete_ctx(ctx); + + return 0; +} + +static int cal_async_complete(struct v4l2_async_notifier *notifier) +{ + struct cal_ctx *ctx = notifier_to_ctx(notifier); + const struct cal_fmt *fmt; + struct v4l2_mbus_framefmt mbus_fmt; + int ret; + + ret = __subdev_get_format(ctx, &mbus_fmt); + if (ret) + return ret; + + fmt = find_format_by_code(ctx, mbus_fmt.code); + if (!fmt) { + ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n", + mbus_fmt.code); + return -EINVAL; + } + + /* Save current subdev format */ + v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt); + ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc; + cal_calc_format_size(ctx, fmt, &ctx->v_fmt); + ctx->fmt = fmt; + ctx->m_fmt = mbus_fmt; + + return 0; +} + +static int cal_complete_ctx(struct cal_ctx *ctx) +{ + struct video_device *vfd; + struct vb2_queue *q; + int ret; + + ctx->timeperframe = tpf_default; + ctx->external_rate = 192000000; + + /* initialize locks */ + spin_lock_init(&ctx->slock); + mutex_init(&ctx->mutex); + + /* initialize queue */ + q = &ctx->vb_vidq; + q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; + q->drv_priv = ctx; + q->buf_struct_size = sizeof(struct cal_buffer); + q->ops = &cal_video_qops; + q->mem_ops = &vb2_dma_contig_memops; + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->lock = &ctx->mutex; + q->min_buffers_needed = 3; + + ret = vb2_queue_init(q); + if (ret) + return ret; + + /* init video dma queues */ + INIT_LIST_HEAD(&ctx->vidq.active); + + vfd = &ctx->vdev; + *vfd = cal_videodev; + vfd->v4l2_dev = &ctx->v4l2_dev; + vfd->queue = q; + + /* + * Provide a mutex to v4l2 core. It will be used to protect + * all fops and v4l2 ioctls. + */ + vfd->lock = &ctx->mutex; + video_set_drvdata(vfd, ctx); + + ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr); + if (ret < 0) + return ret; + + v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n", + video_device_node_name(vfd)); + + ctx->alloc_ctx = vb2_dma_contig_init_ctx(vfd->v4l2_dev->dev); + if (IS_ERR(ctx->alloc_ctx)) { + ctx_err(ctx, "Failed to alloc vb2 context\n"); + ret = PTR_ERR(ctx->alloc_ctx); + goto vdev_unreg; + } + + return 0; + +vdev_unreg: + video_unregister_device(vfd); + return ret; +} + +static struct device_node * +of_get_next_port(const struct device_node *parent, + struct device_node *prev) +{ + struct device_node *port = NULL; + + if (!parent) + return NULL; + + if (!prev) { + struct device_node *ports; + /* + * It's the first call, we have to find a port subnode + * within this node or within an optional 'ports' node. + */ + ports = of_get_child_by_name(parent, "ports"); + if (ports) + parent = ports; + + port = of_get_child_by_name(parent, "port"); + + /* release the 'ports' node */ + of_node_put(ports); + } else { + struct device_node *ports; + + ports = of_get_parent(prev); + if (!ports) + return NULL; + + do { + port = of_get_next_child(ports, prev); + if (!port) { + of_node_put(ports); + return NULL; + } + prev = port; + } while (of_node_cmp(port->name, "port") != 0); + } + + return port; +} + +static struct device_node * +of_get_next_endpoint(const struct device_node *parent, + struct device_node *prev) +{ + struct device_node *ep = NULL; + + if (!parent) + return NULL; + + do { + ep = of_get_next_child(parent, prev); + if (!ep) + return NULL; + prev = ep; + } while (of_node_cmp(ep->name, "endpoint") != 0); + + return ep; +} + +static int of_cal_create_instance(struct cal_ctx *ctx, int inst) +{ + struct platform_device *pdev = ctx->dev->pdev; + struct device_node *ep_node, *port, *remote_ep, + *sensor_node, *parent; + struct v4l2_of_endpoint *endpoint; + struct v4l2_async_subdev *asd; + u32 regval = 0; + int ret, index, found_port = 0, lane; + + parent = pdev->dev.of_node; + + asd = &ctx->asd; + endpoint = &ctx->endpoint; + + ep_node = NULL; + port = NULL; + remote_ep = NULL; + sensor_node = NULL; + ret = -EINVAL; + + ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst); + for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) { + port = of_get_next_port(parent, port); + if (!port) { + ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n", + index); + goto cleanup_exit; + } + + /* Match the slice number with <REG> */ + of_property_read_u32(port, "reg", ®val); + ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n", + index, inst, regval); + if ((regval == inst) && (index == inst)) { + found_port = 1; + break; + } + } + + if (!found_port) { + ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n", + inst); + goto cleanup_exit; + } + + ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n", + inst); + + ep_node = of_get_next_endpoint(port, ep_node); + if (!ep_node) { + ctx_dbg(3, ctx, "can't get next endpoint\n"); + goto cleanup_exit; + } + + sensor_node = of_graph_get_remote_port_parent(ep_node); + if (!sensor_node) { + ctx_dbg(3, ctx, "can't get remote parent\n"); + goto cleanup_exit; + } + asd->match_type = V4L2_ASYNC_MATCH_OF; + asd->match.of.node = sensor_node; + + remote_ep = of_parse_phandle(ep_node, "remote-endpoint", 0); + if (!remote_ep) { + ctx_dbg(3, ctx, "can't get remote-endpoint\n"); + goto cleanup_exit; + } + v4l2_of_parse_endpoint(remote_ep, endpoint); + + if (endpoint->bus_type != V4L2_MBUS_CSI2) { + ctx_err(ctx, "Port:%d sub-device %s is not a CSI2 device\n", + inst, sensor_node->name); + goto cleanup_exit; + } + + /* Store Virtual Channel number */ + ctx->virtual_channel = endpoint->base.id; + + ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst); + ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel); + ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags); + ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane); + ctx_dbg(3, ctx, "num_data_lanes=%d\n", + endpoint->bus.mipi_csi2.num_data_lanes); + ctx_dbg(3, ctx, "data_lanes= <\n"); + for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++) + ctx_dbg(3, ctx, "\t%d\n", + endpoint->bus.mipi_csi2.data_lanes[lane]); + ctx_dbg(3, ctx, "\t>\n"); + + ctx_dbg(1, ctx, "Port: %d found sub-device %s\n", + inst, sensor_node->name); + + ctx->asd_list[0] = asd; + ctx->notifier.subdevs = ctx->asd_list; + ctx->notifier.num_subdevs = 1; + ctx->notifier.bound = cal_async_bound; + ctx->notifier.complete = cal_async_complete; + ret = v4l2_async_notifier_register(&ctx->v4l2_dev, + &ctx->notifier); + if (ret) { + ctx_err(ctx, "Error registering async notifier\n"); + ret = -EINVAL; + } + +cleanup_exit: + if (!remote_ep) + of_node_put(remote_ep); + if (!sensor_node) + of_node_put(sensor_node); + if (!ep_node) + of_node_put(ep_node); + if (!port) + of_node_put(port); + + return ret; +} + +static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst) +{ + struct cal_ctx *ctx; + struct v4l2_ctrl_handler *hdl; + int ret; + + ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + /* save the cal_dev * for future ref */ + ctx->dev = dev; + + snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name), + "%s-%03d", CAL_MODULE_NAME, inst); + ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev); + if (ret) + goto err_exit; + + hdl = &ctx->ctrl_handler; + ret = v4l2_ctrl_handler_init(hdl, 11); + if (ret) { + ctx_err(ctx, "Failed to init ctrl handler\n"); + goto unreg_dev; + } + ctx->v4l2_dev.ctrl_handler = hdl; + + /* Make sure Camera Core H/W register area is available */ + ctx->cc = dev->cc[inst]; + + /* Store the instance id */ + ctx->csi2_port = inst + 1; + + ret = of_cal_create_instance(ctx, inst); + if (ret) { + ret = -EINVAL; + goto free_hdl; + } + return ctx; + +free_hdl: + v4l2_ctrl_handler_free(hdl); +unreg_dev: + v4l2_device_unregister(&ctx->v4l2_dev); +err_exit: + return NULL; +} + +static int cal_probe(struct platform_device *pdev) +{ + struct cal_dev *dev; + int ret; + int irq; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + /* set pseudo v4l2 device name so we can use v4l2_printk */ + strlcpy(dev->v4l2_dev.name, CAL_MODULE_NAME, + sizeof(dev->v4l2_dev.name)); + + /* save pdev pointer */ + dev->pdev = pdev; + + dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "cal_top"); + dev->base = devm_ioremap_resource(&pdev->dev, dev->res); + if (IS_ERR(dev->base)) + return PTR_ERR(dev->base); + + cal_dbg(1, dev, "ioresource %s at %pa - %pa\n", + dev->res->name, &dev->res->start, &dev->res->end); + + irq = platform_get_irq(pdev, 0); + cal_dbg(1, dev, "got irq# %d\n", irq); + ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME, + dev); + if (ret) + return ret; + + platform_set_drvdata(pdev, dev); + + dev->cm = cm_create(dev); + if (IS_ERR(dev->cm)) + return PTR_ERR(dev->cm); + + dev->cc[0] = cc_create(dev, 0); + if (IS_ERR(dev->cc[0])) + return PTR_ERR(dev->cc[0]); + + dev->cc[1] = cc_create(dev, 1); + if (IS_ERR(dev->cc[1])) + return PTR_ERR(dev->cc[1]); + + dev->ctx[0] = NULL; + dev->ctx[1] = NULL; + + dev->ctx[0] = cal_create_instance(dev, 0); + dev->ctx[1] = cal_create_instance(dev, 1); + if (!dev->ctx[0] && !dev->ctx[1]) { + cal_err(dev, "Neither port is configured, no point in staying up\n"); + return -ENODEV; + } + + pm_runtime_enable(&pdev->dev); + + ret = cal_runtime_get(dev); + if (ret) + goto runtime_disable; + + /* Just check we can actually access the module */ + cal_get_hwinfo(dev); + + cal_runtime_put(dev); + + return 0; + +runtime_disable: + pm_runtime_disable(&pdev->dev); + return ret; +} + +static int cal_remove(struct platform_device *pdev) +{ + struct cal_dev *dev = + (struct cal_dev *)platform_get_drvdata(pdev); + struct cal_ctx *ctx; + int i; + + cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME); + + cal_runtime_get(dev); + + for (i = 0; i < CAL_NUM_CONTEXT; i++) { + ctx = dev->ctx[i]; + if (ctx) { + ctx_dbg(1, ctx, "unregistering %s\n", + video_device_node_name(&ctx->vdev)); + camerarx_phy_disable(ctx); + v4l2_async_notifier_unregister(&ctx->notifier); + vb2_dma_contig_cleanup_ctx(ctx->alloc_ctx); + v4l2_ctrl_handler_free(&ctx->ctrl_handler); + v4l2_device_unregister(&ctx->v4l2_dev); + video_unregister_device(&ctx->vdev); + } + } + + cal_runtime_put(dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#if defined(CONFIG_OF) +static const struct of_device_id cal_of_match[] = { + { .compatible = "ti,dra72-cal", }, + {}, +}; +MODULE_DEVICE_TABLE(of, cal_of_match); +#endif + +static struct platform_driver cal_pdrv = { + .probe = cal_probe, + .remove = cal_remove, + .driver = { + .name = CAL_MODULE_NAME, + .of_match_table = of_match_ptr(cal_of_match), + }, +}; + +module_platform_driver(cal_pdrv); diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti-vpe/cal_regs.h new file mode 100644 index 000000000000..82b3dcf87128 --- /dev/null +++ b/drivers/media/platform/ti-vpe/cal_regs.h @@ -0,0 +1,479 @@ +/* + * TI CAL camera interface driver + * + * Copyright (c) 2015 Texas Instruments Inc. + * + * Benoit Parrot, <bparrot@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __TI_CAL_REGS_H +#define __TI_CAL_REGS_H + +#define CAL_NUM_CSI2_PORTS 2 + +/* CAL register offsets */ + +#define CAL_HL_REVISION 0x0000 +#define CAL_HL_HWINFO 0x0004 +#define CAL_HL_SYSCONFIG 0x0010 +#define CAL_HL_IRQ_EOI 0x001c +#define CAL_HL_IRQSTATUS_RAW(m) (0x20U + ((m-1) * 0x10U)) +#define CAL_HL_IRQSTATUS(m) (0x24U + ((m-1) * 0x10U)) +#define CAL_HL_IRQENABLE_SET(m) (0x28U + ((m-1) * 0x10U)) +#define CAL_HL_IRQENABLE_CLR(m) (0x2cU + ((m-1) * 0x10U)) +#define CAL_PIX_PROC(m) (0xc0U + ((m-1) * 0x4U)) +#define CAL_CTRL 0x100 +#define CAL_CTRL1 0x104 +#define CAL_LINE_NUMBER_EVT 0x108 +#define CAL_VPORT_CTRL1 0x120 +#define CAL_VPORT_CTRL2 0x124 +#define CAL_BYS_CTRL1 0x130 +#define CAL_BYS_CTRL2 0x134 +#define CAL_RD_DMA_CTRL 0x140 +#define CAL_RD_DMA_PIX_ADDR 0x144 +#define CAL_RD_DMA_PIX_OFST 0x148 +#define CAL_RD_DMA_XSIZE 0x14c +#define CAL_RD_DMA_YSIZE 0x150 +#define CAL_RD_DMA_INIT_ADDR 0x154 +#define CAL_RD_DMA_INIT_OFST 0x168 +#define CAL_RD_DMA_CTRL2 0x16c +#define CAL_WR_DMA_CTRL(m) (0x200U + ((m-1) * 0x10U)) +#define CAL_WR_DMA_ADDR(m) (0x204U + ((m-1) * 0x10U)) +#define CAL_WR_DMA_OFST(m) (0x208U + ((m-1) * 0x10U)) +#define CAL_WR_DMA_XSIZE(m) (0x20cU + ((m-1) * 0x10U)) +#define CAL_CSI2_PPI_CTRL(m) (0x300U + ((m-1) * 0x80U)) +#define CAL_CSI2_COMPLEXIO_CFG(m) (0x304U + ((m-1) * 0x80U)) +#define CAL_CSI2_COMPLEXIO_IRQSTATUS(m) (0x308U + ((m-1) * 0x80U)) +#define CAL_CSI2_SHORT_PACKET(m) (0x30cU + ((m-1) * 0x80U)) +#define CAL_CSI2_COMPLEXIO_IRQENABLE(m) (0x310U + ((m-1) * 0x80U)) +#define CAL_CSI2_TIMING(m) (0x314U + ((m-1) * 0x80U)) +#define CAL_CSI2_VC_IRQENABLE(m) (0x318U + ((m-1) * 0x80U)) +#define CAL_CSI2_VC_IRQSTATUS(m) (0x328U + ((m-1) * 0x80U)) +#define CAL_CSI2_CTX0(m) (0x330U + ((m-1) * 0x80U)) +#define CAL_CSI2_CTX1(m) (0x334U + ((m-1) * 0x80U)) +#define CAL_CSI2_CTX2(m) (0x338U + ((m-1) * 0x80U)) +#define CAL_CSI2_CTX3(m) (0x33cU + ((m-1) * 0x80U)) +#define CAL_CSI2_CTX4(m) (0x340U + ((m-1) * 0x80U)) +#define CAL_CSI2_CTX5(m) (0x344U + ((m-1) * 0x80U)) +#define CAL_CSI2_CTX6(m) (0x348U + ((m-1) * 0x80U)) +#define CAL_CSI2_CTX7(m) (0x34cU + ((m-1) * 0x80U)) +#define CAL_CSI2_STATUS0(m) (0x350U + ((m-1) * 0x80U)) +#define CAL_CSI2_STATUS1(m) (0x354U + ((m-1) * 0x80U)) +#define CAL_CSI2_STATUS2(m) (0x358U + ((m-1) * 0x80U)) +#define CAL_CSI2_STATUS3(m) (0x35cU + ((m-1) * 0x80U)) +#define CAL_CSI2_STATUS4(m) (0x360U + ((m-1) * 0x80U)) +#define CAL_CSI2_STATUS5(m) (0x364U + ((m-1) * 0x80U)) +#define CAL_CSI2_STATUS6(m) (0x368U + ((m-1) * 0x80U)) +#define CAL_CSI2_STATUS7(m) (0x36cU + ((m-1) * 0x80U)) + +/* CAL CSI2 PHY register offsets */ +#define CAL_CSI2_PHY_REG0 0x000 +#define CAL_CSI2_PHY_REG1 0x004 +#define CAL_CSI2_PHY_REG2 0x008 + +/* CAL Control Module Core Camerrx Control register offsets */ +#define CM_CTRL_CORE_CAMERRX_CONTROL 0x000 + +/********************************************************************* +* Generic value used in various field below +*********************************************************************/ + +#define CAL_GEN_DISABLE 0 +#define CAL_GEN_ENABLE 1 +#define CAL_GEN_FALSE 0 +#define CAL_GEN_TRUE 1 + +/********************************************************************* +* Field Definition Macros +*********************************************************************/ + +#define CAL_HL_REVISION_MINOR_MASK GENMASK(5, 0) +#define CAL_HL_REVISION_CUSTOM_MASK GENMASK(7, 6) +#define CAL_HL_REVISION_MAJOR_MASK GENMASK(10, 8) +#define CAL_HL_REVISION_RTL_MASK GENMASK(15, 11) +#define CAL_HL_REVISION_FUNC_MASK GENMASK(27, 16) +#define CAL_HL_REVISION_SCHEME_MASK GENMASK(31, 30) +#define CAL_HL_REVISION_SCHEME_H08 1 +#define CAL_HL_REVISION_SCHEME_LEGACY 0 + +#define CAL_HL_HWINFO_WFIFO_MASK GENMASK(3, 0) +#define CAL_HL_HWINFO_RFIFO_MASK GENMASK(7, 4) +#define CAL_HL_HWINFO_PCTX_MASK GENMASK(12, 8) +#define CAL_HL_HWINFO_WCTX_MASK GENMASK(18, 13) +#define CAL_HL_HWINFO_VFIFO_MASK GENMASK(22, 19) +#define CAL_HL_HWINFO_NCPORT_MASK GENMASK(27, 23) +#define CAL_HL_HWINFO_NPPI_CTXS0_MASK GENMASK(29, 28) +#define CAL_HL_HWINFO_NPPI_CTXS1_MASK GENMASK(31, 30) +#define CAL_HL_HWINFO_NPPI_CONTEXTS_ZERO 0 +#define CAL_HL_HWINFO_NPPI_CONTEXTS_FOUR 1 +#define CAL_HL_HWINFO_NPPI_CONTEXTS_EIGHT 2 +#define CAL_HL_HWINFO_NPPI_CONTEXTS_RESERVED 3 + +#define CAL_HL_SYSCONFIG_SOFTRESET_MASK BIT_MASK(0) +#define CAL_HL_SYSCONFIG_SOFTRESET_DONE 0x0 +#define CAL_HL_SYSCONFIG_SOFTRESET_PENDING 0x1 +#define CAL_HL_SYSCONFIG_SOFTRESET_NOACTION 0x0 +#define CAL_HL_SYSCONFIG_SOFTRESET_RESET 0x1 +#define CAL_HL_SYSCONFIG_IDLE_MASK GENMASK(3, 2) +#define CAL_HL_SYSCONFIG_IDLEMODE_FORCE 0 +#define CAL_HL_SYSCONFIG_IDLEMODE_NO 1 +#define CAL_HL_SYSCONFIG_IDLEMODE_SMART1 2 +#define CAL_HL_SYSCONFIG_IDLEMODE_SMART2 3 + +#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK BIT_MASK(0) +#define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0 0 +#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0 0 + +#define CAL_HL_IRQ_MASK(m) BIT_MASK(m-1) +#define CAL_HL_IRQ_NOACTION 0x0 +#define CAL_HL_IRQ_ENABLE 0x1 +#define CAL_HL_IRQ_CLEAR 0x1 +#define CAL_HL_IRQ_DISABLED 0x0 +#define CAL_HL_IRQ_ENABLED 0x1 +#define CAL_HL_IRQ_PENDING 0x1 + +#define CAL_PIX_PROC_EN_MASK BIT_MASK(0) +#define CAL_PIX_PROC_EXTRACT_MASK GENMASK(4, 1) +#define CAL_PIX_PROC_EXTRACT_B6 0x0 +#define CAL_PIX_PROC_EXTRACT_B7 0x1 +#define CAL_PIX_PROC_EXTRACT_B8 0x2 +#define CAL_PIX_PROC_EXTRACT_B10 0x3 +#define CAL_PIX_PROC_EXTRACT_B10_MIPI 0x4 +#define CAL_PIX_PROC_EXTRACT_B12 0x5 +#define CAL_PIX_PROC_EXTRACT_B12_MIPI 0x6 +#define CAL_PIX_PROC_EXTRACT_B14 0x7 +#define CAL_PIX_PROC_EXTRACT_B14_MIPI 0x8 +#define CAL_PIX_PROC_EXTRACT_B16_BE 0x9 +#define CAL_PIX_PROC_EXTRACT_B16_LE 0xa +#define CAL_PIX_PROC_DPCMD_MASK GENMASK(9, 5) +#define CAL_PIX_PROC_DPCMD_BYPASS 0x0 +#define CAL_PIX_PROC_DPCMD_DPCM_10_8_1 0x2 +#define CAL_PIX_PROC_DPCMD_DPCM_12_8_1 0x8 +#define CAL_PIX_PROC_DPCMD_DPCM_10_7_1 0x4 +#define CAL_PIX_PROC_DPCMD_DPCM_10_7_2 0x5 +#define CAL_PIX_PROC_DPCMD_DPCM_10_6_1 0x6 +#define CAL_PIX_PROC_DPCMD_DPCM_10_6_2 0x7 +#define CAL_PIX_PROC_DPCMD_DPCM_12_7_1 0xa +#define CAL_PIX_PROC_DPCMD_DPCM_12_6_1 0xc +#define CAL_PIX_PROC_DPCMD_DPCM_14_10 0xe +#define CAL_PIX_PROC_DPCMD_DPCM_14_8_1 0x10 +#define CAL_PIX_PROC_DPCMD_DPCM_16_12_1 0x12 +#define CAL_PIX_PROC_DPCMD_DPCM_16_10_1 0x14 +#define CAL_PIX_PROC_DPCMD_DPCM_16_8_1 0x16 +#define CAL_PIX_PROC_DPCME_MASK GENMASK(15, 11) +#define CAL_PIX_PROC_DPCME_BYPASS 0x0 +#define CAL_PIX_PROC_DPCME_DPCM_10_8_1 0x2 +#define CAL_PIX_PROC_DPCME_DPCM_12_8_1 0x8 +#define CAL_PIX_PROC_DPCME_DPCM_14_10 0xe +#define CAL_PIX_PROC_DPCME_DPCM_14_8_1 0x10 +#define CAL_PIX_PROC_DPCME_DPCM_16_12_1 0x12 +#define CAL_PIX_PROC_DPCME_DPCM_16_10_1 0x14 +#define CAL_PIX_PROC_DPCME_DPCM_16_8_1 0x16 +#define CAL_PIX_PROC_PACK_MASK GENMASK(18, 16) +#define CAL_PIX_PROC_PACK_B8 0x0 +#define CAL_PIX_PROC_PACK_B10_MIPI 0x2 +#define CAL_PIX_PROC_PACK_B12 0x3 +#define CAL_PIX_PROC_PACK_B12_MIPI 0x4 +#define CAL_PIX_PROC_PACK_B16 0x5 +#define CAL_PIX_PROC_PACK_ARGB 0x6 +#define CAL_PIX_PROC_CPORT_MASK GENMASK(23, 19) + +#define CAL_CTRL_POSTED_WRITES_MASK BIT_MASK(0) +#define CAL_CTRL_POSTED_WRITES_NONPOSTED 0 +#define CAL_CTRL_POSTED_WRITES 1 +#define CAL_CTRL_TAGCNT_MASK GENMASK(4, 1) +#define CAL_CTRL_BURSTSIZE_MASK GENMASK(6, 5) +#define CAL_CTRL_BURSTSIZE_BURST16 0x0 +#define CAL_CTRL_BURSTSIZE_BURST32 0x1 +#define CAL_CTRL_BURSTSIZE_BURST64 0x2 +#define CAL_CTRL_BURSTSIZE_BURST128 0x3 +#define CAL_CTRL_LL_FORCE_STATE_MASK GENMASK(12, 7) +#define CAL_CTRL_MFLAGL_MASK GENMASK(20, 13) +#define CAL_CTRL_PWRSCPCLK_MASK BIT_MASK(21) +#define CAL_CTRL_PWRSCPCLK_AUTO 0 +#define CAL_CTRL_PWRSCPCLK_FORCE 1 +#define CAL_CTRL_RD_DMA_STALL_MASK BIT_MASK(22) +#define CAL_CTRL_MFLAGH_MASK GENMASK(31, 24) + +#define CAL_CTRL1_PPI_GROUPING_MASK GENMASK(1, 0) +#define CAL_CTRL1_PPI_GROUPING_DISABLED 0 +#define CAL_CTRL1_PPI_GROUPING_RESERVED 1 +#define CAL_CTRL1_PPI_GROUPING_0 2 +#define CAL_CTRL1_PPI_GROUPING_1 3 +#define CAL_CTRL1_INTERLEAVE01_MASK GENMASK(3, 2) +#define CAL_CTRL1_INTERLEAVE01_DISABLED 0 +#define CAL_CTRL1_INTERLEAVE01_PIX1 1 +#define CAL_CTRL1_INTERLEAVE01_PIX4 2 +#define CAL_CTRL1_INTERLEAVE01_RESERVED 3 +#define CAL_CTRL1_INTERLEAVE23_MASK GENMASK(5, 4) +#define CAL_CTRL1_INTERLEAVE23_DISABLED 0 +#define CAL_CTRL1_INTERLEAVE23_PIX1 1 +#define CAL_CTRL1_INTERLEAVE23_PIX4 2 +#define CAL_CTRL1_INTERLEAVE23_RESERVED 3 + +#define CAL_LINE_NUMBER_EVT_CPORT_MASK GENMASK(4, 0) +#define CAL_LINE_NUMBER_EVT_MASK GENMASK(29, 16) + +#define CAL_VPORT_CTRL1_PCLK_MASK GENMASK(16, 0) +#define CAL_VPORT_CTRL1_XBLK_MASK GENMASK(24, 17) +#define CAL_VPORT_CTRL1_YBLK_MASK GENMASK(30, 25) +#define CAL_VPORT_CTRL1_WIDTH_MASK BIT_MASK(31) +#define CAL_VPORT_CTRL1_WIDTH_ONE 0 +#define CAL_VPORT_CTRL1_WIDTH_TWO 1 + +#define CAL_VPORT_CTRL2_CPORT_MASK GENMASK(4, 0) +#define CAL_VPORT_CTRL2_FREERUNNING_MASK BIT_MASK(15) +#define CAL_VPORT_CTRL2_FREERUNNING_GATED 0 +#define CAL_VPORT_CTRL2_FREERUNNING_FREE 1 +#define CAL_VPORT_CTRL2_FS_RESETS_MASK BIT_MASK(16) +#define CAL_VPORT_CTRL2_FS_RESETS_NO 0 +#define CAL_VPORT_CTRL2_FS_RESETS_YES 1 +#define CAL_VPORT_CTRL2_FSM_RESET_MASK BIT_MASK(17) +#define CAL_VPORT_CTRL2_FSM_RESET_NOEFFECT 0 +#define CAL_VPORT_CTRL2_FSM_RESET 1 +#define CAL_VPORT_CTRL2_RDY_THR_MASK GENMASK(31, 18) + +#define CAL_BYS_CTRL1_PCLK_MASK GENMASK(16, 0) +#define CAL_BYS_CTRL1_XBLK_MASK GENMASK(24, 17) +#define CAL_BYS_CTRL1_YBLK_MASK GENMASK(30, 25) +#define CAL_BYS_CTRL1_BYSINEN_MASK BIT_MASK(31) + +#define CAL_BYS_CTRL2_CPORTIN_MASK GENMASK(4, 0) +#define CAL_BYS_CTRL2_CPORTOUT_MASK GENMASK(9, 5) +#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK BIT_MASK(10) +#define CAL_BYS_CTRL2_DUPLICATEDDATA_NO 0 +#define CAL_BYS_CTRL2_DUPLICATEDDATA_YES 1 +#define CAL_BYS_CTRL2_FREERUNNING_MASK BIT_MASK(11) +#define CAL_BYS_CTRL2_FREERUNNING_NO 0 +#define CAL_BYS_CTRL2_FREERUNNING_YES 1 + +#define CAL_RD_DMA_CTRL_GO_MASK BIT_MASK(0) +#define CAL_RD_DMA_CTRL_GO_DIS 0 +#define CAL_RD_DMA_CTRL_GO_EN 1 +#define CAL_RD_DMA_CTRL_GO_IDLE 0 +#define CAL_RD_DMA_CTRL_GO_BUSY 1 +#define CAL_RD_DMA_CTRL_INIT_MASK BIT_MASK(1) +#define CAL_RD_DMA_CTRL_BW_LIMITER_MASK GENMASK(10, 2) +#define CAL_RD_DMA_CTRL_OCP_TAG_CNT_MASK GENMASK(14, 11) +#define CAL_RD_DMA_CTRL_PCLK_MASK GENMASK(31, 15) + +#define CAL_RD_DMA_PIX_ADDR_MASK GENMASK(31, 3) + +#define CAL_RD_DMA_PIX_OFST_MASK GENMASK(31, 4) + +#define CAL_RD_DMA_XSIZE_MASK GENMASK(31, 19) + +#define CAL_RD_DMA_YSIZE_MASK GENMASK(29, 16) + +#define CAL_RD_DMA_INIT_ADDR_MASK GENMASK(31, 3) + +#define CAL_RD_DMA_INIT_OFST_MASK GENMASK(31, 3) + +#define CAL_RD_DMA_CTRL2_CIRC_MODE_MASK GENMASK(2, 0) +#define CAL_RD_DMA_CTRL2_CIRC_MODE_DIS 0 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_ONE 1 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_FOUR 2 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTEEN 3 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTYFOUR 4 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_RESERVED 5 +#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK BIT_MASK(3) +#define CAL_RD_DMA_CTRL2_PATTERN_MASK GENMASK(5, 4) +#define CAL_RD_DMA_CTRL2_PATTERN_LINEAR 0 +#define CAL_RD_DMA_CTRL2_PATTERN_YUV420 1 +#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP2 2 +#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP4 3 +#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK BIT_MASK(6) +#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_FREERUNNING 0 +#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_WAITFORBYSOUT 1 +#define CAL_RD_DMA_CTRL2_CIRC_SIZE_MASK GENMASK(29, 16) + +#define CAL_WR_DMA_CTRL_MODE_MASK GENMASK(2, 0) +#define CAL_WR_DMA_CTRL_MODE_DIS 0 +#define CAL_WR_DMA_CTRL_MODE_SHD 1 +#define CAL_WR_DMA_CTRL_MODE_CNT 2 +#define CAL_WR_DMA_CTRL_MODE_CNT_INIT 3 +#define CAL_WR_DMA_CTRL_MODE_CONST 4 +#define CAL_WR_DMA_CTRL_MODE_RESERVED 5 +#define CAL_WR_DMA_CTRL_PATTERN_MASK GENMASK(4, 3) +#define CAL_WR_DMA_CTRL_PATTERN_LINEAR 0 +#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP2 2 +#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP4 3 +#define CAL_WR_DMA_CTRL_PATTERN_RESERVED 1 +#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK BIT_MASK(5) +#define CAL_WR_DMA_CTRL_DTAG_MASK GENMASK(8, 6) +#define CAL_WR_DMA_CTRL_DTAG_ATT_HDR 0 +#define CAL_WR_DMA_CTRL_DTAG_ATT_DAT 1 +#define CAL_WR_DMA_CTRL_DTAG 2 +#define CAL_WR_DMA_CTRL_DTAG_PIX_HDR 3 +#define CAL_WR_DMA_CTRL_DTAG_PIX_DAT 4 +#define CAL_WR_DMA_CTRL_DTAG_D5 5 +#define CAL_WR_DMA_CTRL_DTAG_D6 6 +#define CAL_WR_DMA_CTRL_DTAG_D7 7 +#define CAL_WR_DMA_CTRL_CPORT_MASK GENMASK(13, 9) +#define CAL_WR_DMA_CTRL_STALL_RD_MASK BIT_MASK(14) +#define CAL_WR_DMA_CTRL_YSIZE_MASK GENMASK(31, 18) + +#define CAL_WR_DMA_ADDR_MASK GENMASK(31, 4) + +#define CAL_WR_DMA_OFST_MASK GENMASK(18, 4) +#define CAL_WR_DMA_OFST_CIRC_MODE_MASK GENMASK(23, 22) +#define CAL_WR_DMA_OFST_CIRC_MODE_ONE 1 +#define CAL_WR_DMA_OFST_CIRC_MODE_FOUR 2 +#define CAL_WR_DMA_OFST_CIRC_MODE_SIXTYFOUR 3 +#define CAL_WR_DMA_OFST_CIRC_MODE_DISABLED 0 +#define CAL_WR_DMA_OFST_CIRC_SIZE_MASK GENMASK(31, 24) + +#define CAL_WR_DMA_XSIZE_XSKIP_MASK GENMASK(15, 3) +#define CAL_WR_DMA_XSIZE_MASK GENMASK(31, 19) + +#define CAL_CSI2_PPI_CTRL_IF_EN_MASK BIT_MASK(0) +#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK BIT_MASK(2) +#define CAL_CSI2_PPI_CTRL_FRAME_MASK BIT_MASK(3) +#define CAL_CSI2_PPI_CTRL_FRAME_IMMEDIATE 0 +#define CAL_CSI2_PPI_CTRL_FRAME 1 + +#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK GENMASK(2, 0) +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_5 5 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_4 4 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_3 3 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_2 2 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_1 1 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_NOT_USED 0 +#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK BIT_MASK(3) +#define CAL_CSI2_COMPLEXIO_CFG_POL_PLUSMINUS 0 +#define CAL_CSI2_COMPLEXIO_CFG_POL_MINUSPLUS 1 +#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POSITION_MASK GENMASK(6, 4) +#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK BIT_MASK(7) +#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POSITION_MASK GENMASK(10, 8) +#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK BIT_MASK(11) +#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POSITION_MASK GENMASK(14, 12) +#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK BIT_MASK(15) +#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POSITION_MASK GENMASK(18, 16) +#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK BIT_MASK(19) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK BIT_MASK(24) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK GENMASK(26, 25) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF 0 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON 1 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ULP 2 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK GENMASK(28, 27) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF 0 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON 1 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ULP 2 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK BIT_MASK(29) +#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED 1 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING 0 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK BIT_MASK(30) +#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL 0 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL 1 + +#define CAL_CSI2_SHORT_PACKET_MASK GENMASK(23, 0) + +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK BIT_MASK(0) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK BIT_MASK(1) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK BIT_MASK(2) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK BIT_MASK(3) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK BIT_MASK(4) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK BIT_MASK(5) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK BIT_MASK(6) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK BIT_MASK(7) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK BIT_MASK(8) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK BIT_MASK(9) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK BIT_MASK(10) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK BIT_MASK(11) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK BIT_MASK(12) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK BIT_MASK(13) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK BIT_MASK(14) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK BIT_MASK(15) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK BIT_MASK(16) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK BIT_MASK(17) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK BIT_MASK(18) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK BIT_MASK(19) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK BIT_MASK(20) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK BIT_MASK(21) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK BIT_MASK(22) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK BIT_MASK(23) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK BIT_MASK(24) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK BIT_MASK(25) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK BIT_MASK(26) +#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK BIT_MASK(27) +#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK BIT_MASK(28) +#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK BIT_MASK(30) + +#define CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK GENMASK(12, 0) +#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK BIT_MASK(13) +#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK BIT_MASK(14) +#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK BIT_MASK(15) + +#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK BIT_MASK(0) +#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK BIT_MASK(1) +#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK BIT_MASK(2) +#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK BIT_MASK(3) +#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK BIT_MASK(4) +#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK BIT_MASK(5) +#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK BIT_MASK(8) +#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK BIT_MASK(9) +#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK BIT_MASK(10) +#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK BIT_MASK(11) +#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK BIT_MASK(12) +#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK BIT_MASK(13) +#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK BIT_MASK(16) +#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK BIT_MASK(17) +#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK BIT_MASK(18) +#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK BIT_MASK(19) +#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK BIT_MASK(20) +#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK BIT_MASK(21) +#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK BIT_MASK(24) +#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK BIT_MASK(25) +#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK BIT_MASK(26) +#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK BIT_MASK(27) +#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK BIT_MASK(28) +#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK BIT_MASK(29) + +#define CAL_CSI2_CTX_DT_MASK GENMASK(5, 0) +#define CAL_CSI2_CTX_VC_MASK GENMASK(7, 6) +#define CAL_CSI2_CTX_CPORT_MASK GENMASK(12, 8) +#define CAL_CSI2_CTX_ATT_MASK BIT_MASK(13) +#define CAL_CSI2_CTX_ATT_PIX 0 +#define CAL_CSI2_CTX_ATT 1 +#define CAL_CSI2_CTX_PACK_MODE_MASK BIT_MASK(14) +#define CAL_CSI2_CTX_PACK_MODE_LINE 0 +#define CAL_CSI2_CTX_PACK_MODE_FRAME 1 +#define CAL_CSI2_CTX_LINES_MASK GENMASK(29, 16) + +#define CAL_CSI2_STATUS_FRAME_MASK GENMASK(15, 0) + +#define CAL_CSI2_PHY_REG0_THS_SETTLE_MASK GENMASK(7, 0) +#define CAL_CSI2_PHY_REG0_THS_TERM_MASK GENMASK(15, 8) +#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK BIT_MASK(24) +#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE 1 +#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_ENABLE 0 + +#define CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK GENMASK(7, 0) +#define CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK GENMASK(9, 8) +#define CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK GENMASK(17, 10) +#define CAL_CSI2_PHY_REG1_TCLK_TERM_MASK GENMASK(24, 18) +#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK BIT_MASK(25) +#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_ERROR 1 +#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SUCCESS 0 +#define CAL_CSI2_PHY_REG1_RESET_DONE_STATUS_MASK GENMASK(29, 28) + +#define CAL_CSI2_PHY_REG2_CCP2_SYNC_PATTERN_MASK GENMASK(23, 0) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK GENMASK(25, 24) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK GENMASK(27, 26) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK GENMASK(29, 28) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK GENMASK(31, 30) + +#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK BIT_MASK(0) +#define CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK GENMASK(2, 1) +#define CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK GENMASK(4, 3) +#define CM_CAMERRX_CTRL_CSI1_MODE_MASK BIT_MASK(5) +#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK BIT_MASK(10) +#define CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK GENMASK(12, 11) +#define CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK GENMASK(16, 13) +#define CM_CAMERRX_CTRL_CSI0_MODE_MASK BIT_MASK(17) + +#endif diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index 418113c99801..c4b5fab83666 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -1074,7 +1074,7 @@ static int __init vim2m_init(void) if (ret) platform_device_unregister(&vim2m_pdev); - return 0; + return ret; } module_init(vim2m_init); diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c index e15eef6a94e5..bdc380b14e0c 100644 --- a/drivers/media/platform/vivid/vivid-osd.c +++ b/drivers/media/platform/vivid/vivid-osd.c @@ -360,7 +360,7 @@ void vivid_fb_release_buffers(struct vivid_dev *dev) /* Release pseudo palette */ kfree(dev->fb_info.pseudo_palette); - kfree((void *)dev->video_vbase); + kfree(dev->video_vbase); } /* Initialize the specified card */ diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h index 9baed6a10334..93fbaee69675 100644 --- a/drivers/media/platform/vivid/vivid-tpg.h +++ b/drivers/media/platform/vivid/vivid-tpg.h @@ -418,6 +418,8 @@ static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsi tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p]; } + if (tpg_g_interleaved(tpg)) + tpg->bytesperline[1] = tpg->bytesperline[0]; } diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile index 6a93f928dfde..95b3ac2ea7ef 100644 --- a/drivers/media/platform/vsp1/Makefile +++ b/drivers/media/platform/vsp1/Makefile @@ -1,4 +1,5 @@ -vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_video.o +vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_pipe.o +vsp1-y += vsp1_dl.o vsp1_drm.o vsp1_video.o vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o vsp1-y += vsp1_hsit.o vsp1_lif.o vsp1_lut.o vsp1-y += vsp1_bru.o vsp1_sru.o vsp1_uds.o diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h index 989e96f7e360..910d6b8e8b50 100644 --- a/drivers/media/platform/vsp1/vsp1.h +++ b/drivers/media/platform/vsp1/vsp1.h @@ -26,6 +26,9 @@ struct clk; struct device; +struct vsp1_dl; +struct vsp1_drm; +struct vsp1_entity; struct vsp1_platform_data; struct vsp1_bru; struct vsp1_hsit; @@ -42,17 +45,21 @@ struct vsp1_uds; #define VSP1_HAS_LIF (1 << 0) #define VSP1_HAS_LUT (1 << 1) #define VSP1_HAS_SRU (1 << 2) +#define VSP1_HAS_BRU (1 << 3) -struct vsp1_platform_data { +struct vsp1_device_info { + u32 version; unsigned int features; unsigned int rpf_count; unsigned int uds_count; unsigned int wpf_count; + unsigned int num_bru_inputs; + bool uapi; }; struct vsp1_device { struct device *dev; - struct vsp1_platform_data pdata; + const struct vsp1_device_info *info; void __iomem *mmio; struct clk *clock; @@ -71,14 +78,22 @@ struct vsp1_device { struct vsp1_rwpf *wpf[VSP1_MAX_WPF]; struct list_head entities; + struct list_head videos; struct v4l2_device v4l2_dev; struct media_device media_dev; + struct media_entity_operations media_ops; + + struct vsp1_drm *drm; + + bool use_dl; }; int vsp1_device_get(struct vsp1_device *vsp1); void vsp1_device_put(struct vsp1_device *vsp1); +int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index); + static inline u32 vsp1_read(struct vsp1_device *vsp1, u32 reg) { return ioread32(vsp1->mmio + reg); @@ -89,4 +104,14 @@ static inline void vsp1_write(struct vsp1_device *vsp1, u32 reg, u32 data) iowrite32(data, vsp1->mmio + reg); } +#include "vsp1_dl.h" + +static inline void vsp1_mod_write(struct vsp1_entity *e, u32 reg, u32 data) +{ + if (e->vsp1->use_dl) + vsp1_dl_add(e, reg, data); + else + vsp1_write(e->vsp1, reg, data); +} + #endif /* __VSP1_H__ */ diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c index 7dd763311c0f..cb0dbc15ddad 100644 --- a/drivers/media/platform/vsp1/vsp1_bru.c +++ b/drivers/media/platform/vsp1/vsp1_bru.c @@ -19,6 +19,7 @@ #include "vsp1.h" #include "vsp1_bru.h" #include "vsp1_rwpf.h" +#include "vsp1_video.h" #define BRU_MIN_SIZE 1U #define BRU_MAX_SIZE 8190U @@ -27,14 +28,9 @@ * Device Access */ -static inline u32 vsp1_bru_read(struct vsp1_bru *bru, u32 reg) -{ - return vsp1_read(bru->entity.vsp1, reg); -} - static inline void vsp1_bru_write(struct vsp1_bru *bru, u32 reg, u32 data) { - vsp1_write(bru->entity.vsp1, reg, data); + vsp1_mod_write(&bru->entity, reg, data); } /* ----------------------------------------------------------------------------- @@ -83,7 +79,7 @@ static int bru_s_stream(struct v4l2_subdev *subdev, int enable) if (!enable) return 0; - format = &bru->entity.formats[BRU_PAD_SOURCE]; + format = &bru->entity.formats[bru->entity.source_pad]; /* The hardware is extremely flexible but we have no userspace API to * expose all the parameters, nor is it clear whether we would have use @@ -94,7 +90,7 @@ static int bru_s_stream(struct v4l2_subdev *subdev, int enable) /* Disable dithering and enable color data normalization unless the * format at the pipeline output is premultiplied. */ - flags = pipe->output ? pipe->output->video.format.flags : 0; + flags = pipe->output ? pipe->output->format.flags : 0; vsp1_bru_write(bru, VI6_BRU_INCTRL, flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA ? 0 : VI6_BRU_INCTRL_NRM); @@ -113,7 +109,7 @@ static int bru_s_stream(struct v4l2_subdev *subdev, int enable) VI6_BRU_ROP_CROP(VI6_ROP_NOP) | VI6_BRU_ROP_AROP(VI6_ROP_NOP)); - for (i = 0; i < 4; ++i) { + for (i = 0; i < bru->entity.source_pad; ++i) { bool premultiplied = false; u32 ctrl = 0; @@ -125,7 +121,7 @@ static int bru_s_stream(struct v4l2_subdev *subdev, int enable) if (bru->inputs[i].rpf) { ctrl |= VI6_BRU_CTRL_RBC; - premultiplied = bru->inputs[i].rpf->video.format.flags + premultiplied = bru->inputs[i].rpf->format.flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA; } else { ctrl |= VI6_BRU_CTRL_CROP(VI6_ROP_NOP) @@ -295,7 +291,7 @@ static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_con *format = fmt->format; /* Reset the compose rectangle */ - if (fmt->pad != BRU_PAD_SOURCE) { + if (fmt->pad != bru->entity.source_pad) { struct v4l2_rect *compose; compose = bru_get_compose(bru, cfg, fmt->pad, fmt->which); @@ -309,7 +305,7 @@ static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_con if (fmt->pad == BRU_PAD_SINK(0)) { unsigned int i; - for (i = 0; i <= BRU_PAD_SOURCE; ++i) { + for (i = 0; i <= bru->entity.source_pad; ++i) { format = vsp1_entity_get_pad_format(&bru->entity, cfg, i, fmt->which); format->code = fmt->format.code; @@ -325,7 +321,7 @@ static int bru_get_selection(struct v4l2_subdev *subdev, { struct vsp1_bru *bru = to_bru(subdev); - if (sel->pad == BRU_PAD_SOURCE) + if (sel->pad == bru->entity.source_pad) return -EINVAL; switch (sel->target) { @@ -353,7 +349,7 @@ static int bru_set_selection(struct v4l2_subdev *subdev, struct v4l2_mbus_framefmt *format; struct v4l2_rect *compose; - if (sel->pad == BRU_PAD_SOURCE) + if (sel->pad == bru->entity.source_pad) return -EINVAL; if (sel->target != V4L2_SEL_TGT_COMPOSE) @@ -362,8 +358,8 @@ static int bru_set_selection(struct v4l2_subdev *subdev, /* The compose rectangle top left corner must be inside the output * frame. */ - format = vsp1_entity_get_pad_format(&bru->entity, cfg, BRU_PAD_SOURCE, - sel->which); + format = vsp1_entity_get_pad_format(&bru->entity, cfg, + bru->entity.source_pad, sel->which); sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1); sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1); @@ -419,7 +415,8 @@ struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1) bru->entity.type = VSP1_ENTITY_BRU; - ret = vsp1_entity_init(vsp1, &bru->entity, 5); + ret = vsp1_entity_init(vsp1, &bru->entity, + vsp1->info->num_bru_inputs + 1); if (ret < 0) return ERR_PTR(ret); @@ -427,7 +424,7 @@ struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1) subdev = &bru->entity.subdev; v4l2_subdev_init(subdev, &bru_ops); - subdev->entity.ops = &vsp1_media_ops; + subdev->entity.ops = &vsp1->media_ops; subdev->internal_ops = &vsp1_subdev_internal_ops; snprintf(subdev->name, sizeof(subdev->name), "%s bru", dev_name(vsp1->dev)); diff --git a/drivers/media/platform/vsp1/vsp1_bru.h b/drivers/media/platform/vsp1/vsp1_bru.h index 16b1c6554911..dbac9686ea69 100644 --- a/drivers/media/platform/vsp1/vsp1_bru.h +++ b/drivers/media/platform/vsp1/vsp1_bru.h @@ -23,7 +23,6 @@ struct vsp1_device; struct vsp1_rwpf; #define BRU_PAD_SINK(n) (n) -#define BRU_PAD_SOURCE 4 struct vsp1_bru { struct vsp1_entity entity; @@ -33,7 +32,7 @@ struct vsp1_bru { struct { struct vsp1_rwpf *rpf; struct v4l2_rect compose; - } inputs[4]; + } inputs[VSP1_MAX_RPF]; }; static inline struct vsp1_bru *to_bru(struct v4l2_subdev *subdev) diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c new file mode 100644 index 000000000000..7dc27ac6bd02 --- /dev/null +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -0,0 +1,305 @@ +/* + * vsp1_dl.h -- R-Car VSP1 Display List + * + * Copyright (C) 2015 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/gfp.h> +#include <linux/slab.h> + +#include "vsp1.h" +#include "vsp1_dl.h" +#include "vsp1_pipe.h" + +/* + * Global resources + * + * - Display-related interrupts (can be used for vblank evasion ?) + * - Display-list enable + * - Header-less for WPF0 + * - DL swap + */ + +#define VSP1_DL_BODY_SIZE (2 * 4 * 256) +#define VSP1_DL_NUM_LISTS 3 + +struct vsp1_dl_entry { + u32 addr; + u32 data; +} __attribute__((__packed__)); + +struct vsp1_dl_list { + size_t size; + int reg_count; + + bool in_use; + + struct vsp1_dl_entry *body; + dma_addr_t dma; +}; + +/** + * struct vsp1_dl - Display List manager + * @vsp1: the VSP1 device + * @lock: protects the active, queued and pending lists + * @lists.all: array of all allocate display lists + * @lists.active: list currently being processed (loaded) by hardware + * @lists.queued: list queued to the hardware (written to the DL registers) + * @lists.pending: list waiting to be queued to the hardware + * @lists.write: list being written to by software + */ +struct vsp1_dl { + struct vsp1_device *vsp1; + + spinlock_t lock; + + size_t size; + dma_addr_t dma; + void *mem; + + struct { + struct vsp1_dl_list all[VSP1_DL_NUM_LISTS]; + + struct vsp1_dl_list *active; + struct vsp1_dl_list *queued; + struct vsp1_dl_list *pending; + struct vsp1_dl_list *write; + } lists; +}; + +/* ----------------------------------------------------------------------------- + * Display List Transaction Management + */ + +static void vsp1_dl_free_list(struct vsp1_dl_list *list) +{ + if (!list) + return; + + list->in_use = false; +} + +void vsp1_dl_reset(struct vsp1_dl *dl) +{ + unsigned int i; + + dl->lists.active = NULL; + dl->lists.queued = NULL; + dl->lists.pending = NULL; + dl->lists.write = NULL; + + for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) + dl->lists.all[i].in_use = false; +} + +void vsp1_dl_begin(struct vsp1_dl *dl) +{ + struct vsp1_dl_list *list = NULL; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&dl->lock, flags); + + for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) { + if (!dl->lists.all[i].in_use) { + list = &dl->lists.all[i]; + break; + } + } + + if (!list) { + list = dl->lists.pending; + dl->lists.pending = NULL; + } + + spin_unlock_irqrestore(&dl->lock, flags); + + dl->lists.write = list; + + list->in_use = true; + list->reg_count = 0; +} + +void vsp1_dl_add(struct vsp1_entity *e, u32 reg, u32 data) +{ + struct vsp1_pipeline *pipe = to_vsp1_pipeline(&e->subdev.entity); + struct vsp1_dl *dl = pipe->dl; + struct vsp1_dl_list *list = dl->lists.write; + + list->body[list->reg_count].addr = reg; + list->body[list->reg_count].data = data; + list->reg_count++; +} + +void vsp1_dl_commit(struct vsp1_dl *dl) +{ + struct vsp1_device *vsp1 = dl->vsp1; + struct vsp1_dl_list *list; + unsigned long flags; + bool update; + + list = dl->lists.write; + dl->lists.write = NULL; + + spin_lock_irqsave(&dl->lock, flags); + + /* Once the UPD bit has been set the hardware can start processing the + * display list at any time and we can't touch the address and size + * registers. In that case mark the update as pending, it will be + * queued up to the hardware by the frame end interrupt handler. + */ + update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD); + if (update) { + vsp1_dl_free_list(dl->lists.pending); + dl->lists.pending = list; + goto done; + } + + /* Program the hardware with the display list body address and size. + * The UPD bit will be cleared by the device when the display list is + * processed. + */ + vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma); + vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | + (list->reg_count * 8)); + + vsp1_dl_free_list(dl->lists.queued); + dl->lists.queued = list; + +done: + spin_unlock_irqrestore(&dl->lock, flags); +} + +/* ----------------------------------------------------------------------------- + * Interrupt Handling + */ + +void vsp1_dl_irq_display_start(struct vsp1_dl *dl) +{ + spin_lock(&dl->lock); + + /* The display start interrupt signals the end of the display list + * processing by the device. The active display list, if any, won't be + * accessed anymore and can be reused. + */ + if (dl->lists.active) { + vsp1_dl_free_list(dl->lists.active); + dl->lists.active = NULL; + } + + spin_unlock(&dl->lock); +} + +void vsp1_dl_irq_frame_end(struct vsp1_dl *dl) +{ + struct vsp1_device *vsp1 = dl->vsp1; + + spin_lock(&dl->lock); + + /* The UPD bit set indicates that the commit operation raced with the + * interrupt and occurred after the frame end event and UPD clear but + * before interrupt processing. The hardware hasn't taken the update + * into account yet, we'll thus skip one frame and retry. + */ + if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD) + goto done; + + /* The device starts processing the queued display list right after the + * frame end interrupt. The display list thus becomes active. + */ + if (dl->lists.queued) { + WARN_ON(dl->lists.active); + dl->lists.active = dl->lists.queued; + dl->lists.queued = NULL; + } + + /* Now that the UPD bit has been cleared we can queue the next display + * list to the hardware if one has been prepared. + */ + if (dl->lists.pending) { + struct vsp1_dl_list *list = dl->lists.pending; + + vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma); + vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | + (list->reg_count * 8)); + + dl->lists.queued = list; + dl->lists.pending = NULL; + } + +done: + spin_unlock(&dl->lock); +} + +/* ----------------------------------------------------------------------------- + * Hardware Setup + */ + +void vsp1_dl_setup(struct vsp1_device *vsp1) +{ + u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT) + | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0 + | VI6_DL_CTRL_DLE; + + /* The DRM pipeline operates with header-less display lists in + * Continuous Frame Mode. + */ + if (vsp1->drm) + ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0; + + vsp1_write(vsp1, VI6_DL_CTRL, ctrl); + vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS); +} + +/* ----------------------------------------------------------------------------- + * Initialization and Cleanup + */ + +struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1) +{ + struct vsp1_dl *dl; + unsigned int i; + + dl = kzalloc(sizeof(*dl), GFP_KERNEL); + if (!dl) + return NULL; + + spin_lock_init(&dl->lock); + + dl->vsp1 = vsp1; + dl->size = VSP1_DL_BODY_SIZE * ARRAY_SIZE(dl->lists.all); + + dl->mem = dma_alloc_writecombine(vsp1->dev, dl->size, &dl->dma, + GFP_KERNEL); + if (!dl->mem) { + kfree(dl); + return NULL; + } + + for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) { + struct vsp1_dl_list *list = &dl->lists.all[i]; + + list->size = VSP1_DL_BODY_SIZE; + list->reg_count = 0; + list->in_use = false; + list->dma = dl->dma + VSP1_DL_BODY_SIZE * i; + list->body = dl->mem + VSP1_DL_BODY_SIZE * i; + } + + return dl; +} + +void vsp1_dl_destroy(struct vsp1_dl *dl) +{ + dma_free_writecombine(dl->vsp1->dev, dl->size, dl->mem, dl->dma); + kfree(dl); +} diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h new file mode 100644 index 000000000000..448c4250e54c --- /dev/null +++ b/drivers/media/platform/vsp1/vsp1_dl.h @@ -0,0 +1,42 @@ +/* + * vsp1_dl.h -- R-Car VSP1 Display List + * + * Copyright (C) 2015 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __VSP1_DL_H__ +#define __VSP1_DL_H__ + +#include "vsp1_entity.h" + +struct vsp1_device; +struct vsp1_dl; + +struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1); +void vsp1_dl_destroy(struct vsp1_dl *dl); + +void vsp1_dl_setup(struct vsp1_device *vsp1); + +void vsp1_dl_reset(struct vsp1_dl *dl); +void vsp1_dl_begin(struct vsp1_dl *dl); +void vsp1_dl_add(struct vsp1_entity *e, u32 reg, u32 data); +void vsp1_dl_commit(struct vsp1_dl *dl); + +void vsp1_dl_irq_display_start(struct vsp1_dl *dl); +void vsp1_dl_irq_frame_end(struct vsp1_dl *dl); + +static inline void vsp1_dl_mod_write(struct vsp1_entity *e, u32 reg, u32 data) +{ + if (e->vsp1->use_dl) + vsp1_dl_add(e, reg, data); + else + vsp1_write(e->vsp1, reg, data); +} + +#endif /* __VSP1_DL_H__ */ diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c new file mode 100644 index 000000000000..021fe5778cd1 --- /dev/null +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -0,0 +1,597 @@ +/* + * vsp1_drm.c -- R-Car VSP1 DRM API + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/vsp1.h> + +#include <media/media-entity.h> +#include <media/v4l2-subdev.h> + +#include "vsp1.h" +#include "vsp1_bru.h" +#include "vsp1_dl.h" +#include "vsp1_drm.h" +#include "vsp1_lif.h" +#include "vsp1_pipe.h" +#include "vsp1_rwpf.h" + +/* ----------------------------------------------------------------------------- + * Runtime Handling + */ + +static void vsp1_drm_pipeline_frame_end(struct vsp1_pipeline *pipe) +{ + unsigned long flags; + + spin_lock_irqsave(&pipe->irqlock, flags); + if (pipe->num_inputs) + vsp1_pipeline_run(pipe); + spin_unlock_irqrestore(&pipe->irqlock, flags); +} + +/* ----------------------------------------------------------------------------- + * DU Driver API + */ + +int vsp1_du_init(struct device *dev) +{ + struct vsp1_device *vsp1 = dev_get_drvdata(dev); + + if (!vsp1) + return -EPROBE_DEFER; + + return 0; +} +EXPORT_SYMBOL_GPL(vsp1_du_init); + +/** + * vsp1_du_setup_lif - Setup the output part of the VSP pipeline + * @dev: the VSP device + * @width: output frame width in pixels + * @height: output frame height in pixels + * + * Configure the output part of VSP DRM pipeline for the given frame @width and + * @height. This sets up formats on the BRU source pad, the WPF0 sink and source + * pads, and the LIF sink pad. + * + * As the media bus code on the BRU source pad is conditioned by the + * configuration of the BRU sink 0 pad, we also set up the formats on all BRU + * sinks, even if the configuration will be overwritten later by + * vsp1_du_setup_rpf(). This ensures that the BRU configuration is set to a well + * defined state. + * + * Return 0 on success or a negative error code on failure. + */ +int vsp1_du_setup_lif(struct device *dev, unsigned int width, + unsigned int height) +{ + struct vsp1_device *vsp1 = dev_get_drvdata(dev); + struct vsp1_pipeline *pipe = &vsp1->drm->pipe; + struct vsp1_bru *bru = vsp1->bru; + struct v4l2_subdev_format format; + unsigned int i; + int ret; + + dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", + __func__, width, height); + + if (width == 0 || height == 0) { + /* Zero width or height means the CRTC is being disabled, stop + * the pipeline and turn the light off. + */ + ret = vsp1_pipeline_stop(pipe); + if (ret == -ETIMEDOUT) + dev_err(vsp1->dev, "DRM pipeline stop timeout\n"); + + media_entity_pipeline_stop(&pipe->output->entity.subdev.entity); + + for (i = 0; i < bru->entity.source_pad; ++i) { + bru->inputs[i].rpf = NULL; + pipe->inputs[i] = NULL; + } + + pipe->num_inputs = 0; + + vsp1_device_put(vsp1); + + dev_dbg(vsp1->dev, "%s: pipeline disabled\n", __func__); + + return 0; + } + + vsp1_dl_reset(vsp1->drm->dl); + + /* Configure the format at the BRU sinks and propagate it through the + * pipeline. + */ + memset(&format, 0, sizeof(format)); + format.which = V4L2_SUBDEV_FORMAT_ACTIVE; + + for (i = 0; i < bru->entity.source_pad; ++i) { + format.pad = i; + + format.format.width = width; + format.format.height = height; + format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; + format.format.field = V4L2_FIELD_NONE; + + ret = v4l2_subdev_call(&bru->entity.subdev, pad, + set_fmt, NULL, &format); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n", + __func__, format.format.width, format.format.height, + format.format.code, i); + } + + format.pad = bru->entity.source_pad; + format.format.width = width; + format.format.height = height; + format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; + format.format.field = V4L2_FIELD_NONE; + + ret = v4l2_subdev_call(&bru->entity.subdev, pad, set_fmt, NULL, + &format); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n", + __func__, format.format.width, format.format.height, + format.format.code, i); + + format.pad = RWPF_PAD_SINK; + ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, set_fmt, NULL, + &format); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF0 sink\n", + __func__, format.format.width, format.format.height, + format.format.code); + + format.pad = RWPF_PAD_SOURCE; + ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, get_fmt, NULL, + &format); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF0 source\n", + __func__, format.format.width, format.format.height, + format.format.code); + + format.pad = LIF_PAD_SINK; + ret = v4l2_subdev_call(&vsp1->lif->entity.subdev, pad, set_fmt, NULL, + &format); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF sink\n", + __func__, format.format.width, format.format.height, + format.format.code); + + /* Verify that the format at the output of the pipeline matches the + * requested frame size and media bus code. + */ + if (format.format.width != width || format.format.height != height || + format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) { + dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__); + return -EPIPE; + } + + /* Mark the pipeline as streaming and enable the VSP1. This will store + * the pipeline pointer in all entities, which the s_stream handlers + * will need. We don't start the entities themselves right at this point + * as there's no plane configured yet, so we can't start processing + * buffers. + */ + ret = vsp1_device_get(vsp1); + if (ret < 0) + return ret; + + ret = media_entity_pipeline_start(&pipe->output->entity.subdev.entity, + &pipe->pipe); + if (ret < 0) { + dev_dbg(vsp1->dev, "%s: pipeline start failed\n", __func__); + vsp1_device_put(vsp1); + return ret; + } + + dev_dbg(vsp1->dev, "%s: pipeline enabled\n", __func__); + + return 0; +} +EXPORT_SYMBOL_GPL(vsp1_du_setup_lif); + +/** + * vsp1_du_atomic_begin - Prepare for an atomic update + * @dev: the VSP device + */ +void vsp1_du_atomic_begin(struct device *dev) +{ + struct vsp1_device *vsp1 = dev_get_drvdata(dev); + struct vsp1_pipeline *pipe = &vsp1->drm->pipe; + unsigned long flags; + + spin_lock_irqsave(&pipe->irqlock, flags); + + vsp1->drm->num_inputs = pipe->num_inputs; + + spin_unlock_irqrestore(&pipe->irqlock, flags); + + /* Prepare the display list. */ + vsp1_dl_begin(vsp1->drm->dl); +} +EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin); + +/** + * vsp1_du_atomic_update - Setup one RPF input of the VSP pipeline + * @dev: the VSP device + * @rpf_index: index of the RPF to setup (0-based) + * @pixelformat: V4L2 pixel format for the RPF memory input + * @pitch: number of bytes per line in the image stored in memory + * @mem: DMA addresses of the memory buffers (one per plane) + * @src: the source crop rectangle for the RPF + * @dst: the destination compose rectangle for the BRU input + * + * Configure the VSP to perform composition of the image referenced by @mem + * through RPF @rpf_index, using the @src crop rectangle and the @dst + * composition rectangle. The Z-order is fixed with RPF 0 at the bottom. + * + * Image format as stored in memory is expressed as a V4L2 @pixelformat value. + * As a special case, setting the pixel format to 0 will disable the RPF. The + * @pitch, @mem, @src and @dst parameters are ignored in that case. Calling the + * function on a disabled RPF is allowed. + * + * The memory pitch is configurable to allow for padding at end of lines, or + * simple for images that extend beyond the crop rectangle boundaries. The + * @pitch value is expressed in bytes and applies to all planes for multiplanar + * formats. + * + * The source memory buffer is referenced by the DMA address of its planes in + * the @mem array. Up to two planes are supported. The second plane DMA address + * is ignored for formats using a single plane. + * + * This function isn't reentrant, the caller needs to serialize calls. + * + * TODO: Implement Z-order control by decoupling the RPF index from the BRU + * input index. + * + * Return 0 on success or a negative error code on failure. + */ +int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index, + u32 pixelformat, unsigned int pitch, + dma_addr_t mem[2], const struct v4l2_rect *src, + const struct v4l2_rect *dst) +{ + struct vsp1_device *vsp1 = dev_get_drvdata(dev); + struct vsp1_pipeline *pipe = &vsp1->drm->pipe; + const struct vsp1_format_info *fmtinfo; + struct v4l2_subdev_selection sel; + struct v4l2_subdev_format format; + struct vsp1_rwpf_memory memory; + struct vsp1_rwpf *rpf; + unsigned long flags; + int ret; + + if (rpf_index >= vsp1->info->rpf_count) + return -EINVAL; + + rpf = vsp1->rpf[rpf_index]; + + if (pixelformat == 0) { + dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__, + rpf_index); + + spin_lock_irqsave(&pipe->irqlock, flags); + + if (pipe->inputs[rpf_index]) { + /* Remove the RPF from the pipeline if it was previously + * enabled. + */ + vsp1->bru->inputs[rpf_index].rpf = NULL; + pipe->inputs[rpf_index] = NULL; + + pipe->num_inputs--; + } + + spin_unlock_irqrestore(&pipe->irqlock, flags); + + return 0; + } + + dev_dbg(vsp1->dev, + "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad }\n", + __func__, rpf_index, + src->left, src->top, src->width, src->height, + dst->left, dst->top, dst->width, dst->height, + pixelformat, pitch, &mem[0], &mem[1]); + + /* Set the stride at the RPF input. */ + fmtinfo = vsp1_get_format_info(pixelformat); + if (!fmtinfo) { + dev_dbg(vsp1->dev, "Unsupport pixel format %08x for RPF\n", + pixelformat); + return -EINVAL; + } + + rpf->fmtinfo = fmtinfo; + rpf->format.num_planes = fmtinfo->planes; + rpf->format.plane_fmt[0].bytesperline = pitch; + rpf->format.plane_fmt[1].bytesperline = pitch; + + /* Configure the format on the RPF sink pad and propagate it up to the + * BRU sink pad. + */ + memset(&format, 0, sizeof(format)); + format.which = V4L2_SUBDEV_FORMAT_ACTIVE; + format.pad = RWPF_PAD_SINK; + format.format.width = src->width + src->left; + format.format.height = src->height + src->top; + format.format.code = fmtinfo->mbus; + format.format.field = V4L2_FIELD_NONE; + + ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL, + &format); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, + "%s: set format %ux%u (%x) on RPF%u sink\n", + __func__, format.format.width, format.format.height, + format.format.code, rpf->entity.index); + + memset(&sel, 0, sizeof(sel)); + sel.which = V4L2_SUBDEV_FORMAT_ACTIVE; + sel.pad = RWPF_PAD_SINK; + sel.target = V4L2_SEL_TGT_CROP; + sel.r = *src; + + ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_selection, NULL, + &sel); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, + "%s: set selection (%u,%u)/%ux%u on RPF%u sink\n", + __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height, + rpf->entity.index); + + /* RPF source, hardcode the format to ARGB8888 to turn on format + * conversion if needed. + */ + format.pad = RWPF_PAD_SOURCE; + + ret = v4l2_subdev_call(&rpf->entity.subdev, pad, get_fmt, NULL, + &format); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, + "%s: got format %ux%u (%x) on RPF%u source\n", + __func__, format.format.width, format.format.height, + format.format.code, rpf->entity.index); + + format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; + + ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL, + &format); + if (ret < 0) + return ret; + + /* BRU sink, propagate the format from the RPF source. */ + format.pad = rpf->entity.index; + + ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_fmt, NULL, + &format); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n", + __func__, format.format.width, format.format.height, + format.format.code, format.pad); + + sel.pad = rpf->entity.index; + sel.target = V4L2_SEL_TGT_COMPOSE; + sel.r = *dst; + + ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_selection, + NULL, &sel); + if (ret < 0) + return ret; + + dev_dbg(vsp1->dev, + "%s: set selection (%u,%u)/%ux%u on BRU pad %u\n", + __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height, + sel.pad); + + /* Store the compose rectangle coordinates in the RPF. */ + rpf->location.left = dst->left; + rpf->location.top = dst->top; + + /* Set the memory buffer address. */ + memory.num_planes = fmtinfo->planes; + memory.addr[0] = mem[0]; + memory.addr[1] = mem[1]; + + rpf->ops->set_memory(rpf, &memory); + + spin_lock_irqsave(&pipe->irqlock, flags); + + /* If the RPF was previously stopped set the BRU input to the RPF and + * store the RPF in the pipeline inputs array. + */ + if (!pipe->inputs[rpf->entity.index]) { + vsp1->bru->inputs[rpf_index].rpf = rpf; + pipe->inputs[rpf->entity.index] = rpf; + pipe->num_inputs++; + } + + spin_unlock_irqrestore(&pipe->irqlock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(vsp1_du_atomic_update); + +/** + * vsp1_du_atomic_flush - Commit an atomic update + * @dev: the VSP device + */ +void vsp1_du_atomic_flush(struct device *dev) +{ + struct vsp1_device *vsp1 = dev_get_drvdata(dev); + struct vsp1_pipeline *pipe = &vsp1->drm->pipe; + struct vsp1_entity *entity; + unsigned long flags; + bool stop = false; + int ret; + + list_for_each_entry(entity, &pipe->entities, list_pipe) { + /* Disconnect unused RPFs from the pipeline. */ + if (entity->type == VSP1_ENTITY_RPF) { + struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev); + + if (!pipe->inputs[rpf->entity.index]) { + vsp1_mod_write(entity, entity->route->reg, + VI6_DPR_NODE_UNUSED); + continue; + } + } + + vsp1_entity_route_setup(entity); + + ret = v4l2_subdev_call(&entity->subdev, video, + s_stream, 1); + if (ret < 0) { + dev_err(vsp1->dev, + "DRM pipeline start failure on entity %s\n", + entity->subdev.name); + return; + } + } + + vsp1_dl_commit(vsp1->drm->dl); + + spin_lock_irqsave(&pipe->irqlock, flags); + + /* Start or stop the pipeline if needed. */ + if (!vsp1->drm->num_inputs && pipe->num_inputs) { + vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0); + vsp1_write(vsp1, VI6_DISP_IRQ_ENB, VI6_DISP_IRQ_ENB_DSTE); + vsp1_pipeline_run(pipe); + } else if (vsp1->drm->num_inputs && !pipe->num_inputs) { + stop = true; + } + + spin_unlock_irqrestore(&pipe->irqlock, flags); + + if (stop) { + vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0); + vsp1_pipeline_stop(pipe); + } +} +EXPORT_SYMBOL_GPL(vsp1_du_atomic_flush); + +/* ----------------------------------------------------------------------------- + * Initialization + */ + +int vsp1_drm_create_links(struct vsp1_device *vsp1) +{ + const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; + unsigned int i; + int ret; + + /* VSPD instances require a BRU to perform composition and a LIF to + * output to the DU. + */ + if (!vsp1->bru || !vsp1->lif) + return -ENXIO; + + for (i = 0; i < vsp1->info->rpf_count; ++i) { + struct vsp1_rwpf *rpf = vsp1->rpf[i]; + + ret = media_create_pad_link(&rpf->entity.subdev.entity, + RWPF_PAD_SOURCE, + &vsp1->bru->entity.subdev.entity, + i, flags); + if (ret < 0) + return ret; + + rpf->entity.sink = &vsp1->bru->entity.subdev.entity; + rpf->entity.sink_pad = i; + } + + ret = media_create_pad_link(&vsp1->bru->entity.subdev.entity, + vsp1->bru->entity.source_pad, + &vsp1->wpf[0]->entity.subdev.entity, + RWPF_PAD_SINK, flags); + if (ret < 0) + return ret; + + vsp1->bru->entity.sink = &vsp1->wpf[0]->entity.subdev.entity; + vsp1->bru->entity.sink_pad = RWPF_PAD_SINK; + + ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity, + RWPF_PAD_SOURCE, + &vsp1->lif->entity.subdev.entity, + LIF_PAD_SINK, flags); + if (ret < 0) + return ret; + + return 0; +} + +int vsp1_drm_init(struct vsp1_device *vsp1) +{ + struct vsp1_pipeline *pipe; + unsigned int i; + + vsp1->drm = devm_kzalloc(vsp1->dev, sizeof(*vsp1->drm), GFP_KERNEL); + if (!vsp1->drm) + return -ENOMEM; + + vsp1->drm->dl = vsp1_dl_create(vsp1); + if (!vsp1->drm->dl) + return -ENOMEM; + + pipe = &vsp1->drm->pipe; + + vsp1_pipeline_init(pipe); + pipe->frame_end = vsp1_drm_pipeline_frame_end; + + /* The DRM pipeline is static, add entities manually. */ + for (i = 0; i < vsp1->info->rpf_count; ++i) { + struct vsp1_rwpf *input = vsp1->rpf[i]; + + list_add_tail(&input->entity.list_pipe, &pipe->entities); + } + + list_add_tail(&vsp1->bru->entity.list_pipe, &pipe->entities); + list_add_tail(&vsp1->wpf[0]->entity.list_pipe, &pipe->entities); + list_add_tail(&vsp1->lif->entity.list_pipe, &pipe->entities); + + pipe->bru = &vsp1->bru->entity; + pipe->lif = &vsp1->lif->entity; + pipe->output = vsp1->wpf[0]; + + pipe->dl = vsp1->drm->dl; + + return 0; +} + +void vsp1_drm_cleanup(struct vsp1_device *vsp1) +{ + vsp1_dl_destroy(vsp1->drm->dl); +} diff --git a/drivers/media/platform/vsp1/vsp1_drm.h b/drivers/media/platform/vsp1/vsp1_drm.h new file mode 100644 index 000000000000..f68056838319 --- /dev/null +++ b/drivers/media/platform/vsp1/vsp1_drm.h @@ -0,0 +1,49 @@ +/* + * vsp1_drm.h -- R-Car VSP1 DRM/KMS Interface + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __VSP1_DRM_H__ +#define __VSP1_DRM_H__ + +#include "vsp1_pipe.h" + +struct vsp1_dl; + +/** + * vsp1_drm - State for the API exposed to the DRM driver + * @dl: display list for DRM pipeline operation + * @pipe: the VSP1 pipeline used for display + * @num_inputs: number of active pipeline inputs at the beginning of an update + * @update: the pipeline configuration has been updated + */ +struct vsp1_drm { + struct vsp1_dl *dl; + struct vsp1_pipeline pipe; + unsigned int num_inputs; + bool update; +}; + +int vsp1_drm_init(struct vsp1_device *vsp1); +void vsp1_drm_cleanup(struct vsp1_device *vsp1); +int vsp1_drm_create_links(struct vsp1_device *vsp1); + +int vsp1_du_init(struct device *dev); +int vsp1_du_setup_lif(struct device *dev, unsigned int width, + unsigned int height); +void vsp1_du_atomic_begin(struct device *dev); +int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index, + u32 pixelformat, unsigned int pitch, + dma_addr_t mem[2], const struct v4l2_rect *src, + const struct v4l2_rect *dst); +void vsp1_du_atomic_flush(struct device *dev); + + +#endif /* __VSP1_DRM_H__ */ diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index 533bc796391e..25750a0e4631 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c @@ -17,17 +17,23 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/videodev2.h> +#include <media/v4l2-subdev.h> + #include "vsp1.h" #include "vsp1_bru.h" +#include "vsp1_dl.h" +#include "vsp1_drm.h" #include "vsp1_hsit.h" #include "vsp1_lif.h" #include "vsp1_lut.h" #include "vsp1_rwpf.h" #include "vsp1_sru.h" #include "vsp1_uds.h" +#include "vsp1_video.h" /* ----------------------------------------------------------------------------- * Interrupt Handling @@ -39,11 +45,11 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data) struct vsp1_device *vsp1 = data; irqreturn_t ret = IRQ_NONE; unsigned int i; + u32 status; - for (i = 0; i < vsp1->pdata.wpf_count; ++i) { + for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf = vsp1->wpf[i]; struct vsp1_pipeline *pipe; - u32 status; if (wpf == NULL) continue; @@ -58,6 +64,21 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data) } } + status = vsp1_read(vsp1, VI6_DISP_IRQ_STA); + vsp1_write(vsp1, VI6_DISP_IRQ_STA, ~status & VI6_DISP_IRQ_STA_DST); + + if (status & VI6_DISP_IRQ_STA_DST) { + struct vsp1_rwpf *wpf = vsp1->wpf[0]; + struct vsp1_pipeline *pipe; + + if (wpf) { + pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity); + vsp1_pipeline_display_start(pipe); + } + + ret = IRQ_HANDLED; + } + return ret; } @@ -66,7 +87,7 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data) */ /* - * vsp1_create_links - Create links from all sources to the given sink + * vsp1_create_sink_links - Create links from all sources to the given sink * * This function creates media links from all valid sources to the given sink * pad. Links that would be invalid according to the VSP1 hardware capabilities @@ -75,7 +96,8 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data) * - from a UDS to a UDS (UDS entities can't be chained) * - from an entity to itself (no loops are allowed) */ -static int vsp1_create_links(struct vsp1_device *vsp1, struct vsp1_entity *sink) +static int vsp1_create_sink_links(struct vsp1_device *vsp1, + struct vsp1_entity *sink) { struct media_entity *entity = &sink->subdev.entity; struct vsp1_entity *source; @@ -115,19 +137,86 @@ static int vsp1_create_links(struct vsp1_device *vsp1, struct vsp1_entity *sink) return 0; } -static void vsp1_destroy_entities(struct vsp1_device *vsp1) +static int vsp1_uapi_create_links(struct vsp1_device *vsp1) { struct vsp1_entity *entity; - struct vsp1_entity *next; + unsigned int i; + int ret; + + list_for_each_entry(entity, &vsp1->entities, list_dev) { + if (entity->type == VSP1_ENTITY_LIF || + entity->type == VSP1_ENTITY_RPF) + continue; + + ret = vsp1_create_sink_links(vsp1, entity); + if (ret < 0) + return ret; + } + + if (vsp1->info->features & VSP1_HAS_LIF) { + ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity, + RWPF_PAD_SOURCE, + &vsp1->lif->entity.subdev.entity, + LIF_PAD_SINK, 0); + if (ret < 0) + return ret; + } + + for (i = 0; i < vsp1->info->rpf_count; ++i) { + struct vsp1_rwpf *rpf = vsp1->rpf[i]; - list_for_each_entry_safe(entity, next, &vsp1->entities, list_dev) { + ret = media_create_pad_link(&rpf->video->video.entity, 0, + &rpf->entity.subdev.entity, + RWPF_PAD_SINK, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (ret < 0) + return ret; + } + + for (i = 0; i < vsp1->info->wpf_count; ++i) { + /* Connect the video device to the WPF. All connections are + * immutable except for the WPF0 source link if a LIF is + * present. + */ + struct vsp1_rwpf *wpf = vsp1->wpf[i]; + unsigned int flags = MEDIA_LNK_FL_ENABLED; + + if (!(vsp1->info->features & VSP1_HAS_LIF) || i != 0) + flags |= MEDIA_LNK_FL_IMMUTABLE; + + ret = media_create_pad_link(&wpf->entity.subdev.entity, + RWPF_PAD_SOURCE, + &wpf->video->video.entity, 0, + flags); + if (ret < 0) + return ret; + } + + return 0; +} + +static void vsp1_destroy_entities(struct vsp1_device *vsp1) +{ + struct vsp1_entity *entity, *_entity; + struct vsp1_video *video, *_video; + + list_for_each_entry_safe(entity, _entity, &vsp1->entities, list_dev) { list_del(&entity->list_dev); vsp1_entity_destroy(entity); } + list_for_each_entry_safe(video, _video, &vsp1->videos, list) { + list_del(&video->list); + vsp1_video_cleanup(video); + } + v4l2_device_unregister(&vsp1->v4l2_dev); media_device_unregister(&vsp1->media_dev); media_device_cleanup(&vsp1->media_dev); + + if (!vsp1->info->uapi) + vsp1_drm_cleanup(vsp1); } static int vsp1_create_entities(struct vsp1_device *vsp1) @@ -144,6 +233,14 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) dev_name(mdev->dev)); media_device_init(mdev); + vsp1->media_ops.link_setup = vsp1_entity_link_setup; + /* Don't perform link validation when the userspace API is disabled as + * the pipeline is configured internally by the driver in that case, and + * its configuration can thus be trusted. + */ + if (vsp1->info->uapi) + vsp1->media_ops.link_validate = v4l2_subdev_link_validate; + vdev->mdev = mdev; ret = v4l2_device_register(vsp1->dev, vdev); if (ret < 0) { @@ -153,13 +250,15 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) } /* Instantiate all the entities. */ - vsp1->bru = vsp1_bru_create(vsp1); - if (IS_ERR(vsp1->bru)) { - ret = PTR_ERR(vsp1->bru); - goto done; - } + if (vsp1->info->features & VSP1_HAS_BRU) { + vsp1->bru = vsp1_bru_create(vsp1); + if (IS_ERR(vsp1->bru)) { + ret = PTR_ERR(vsp1->bru); + goto done; + } - list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities); + list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities); + } vsp1->hsi = vsp1_hsit_create(vsp1, true); if (IS_ERR(vsp1->hsi)) { @@ -177,7 +276,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities); - if (vsp1->pdata.features & VSP1_HAS_LIF) { + if (vsp1->info->features & VSP1_HAS_LIF) { vsp1->lif = vsp1_lif_create(vsp1); if (IS_ERR(vsp1->lif)) { ret = PTR_ERR(vsp1->lif); @@ -187,7 +286,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities); } - if (vsp1->pdata.features & VSP1_HAS_LUT) { + if (vsp1->info->features & VSP1_HAS_LUT) { vsp1->lut = vsp1_lut_create(vsp1); if (IS_ERR(vsp1->lut)) { ret = PTR_ERR(vsp1->lut); @@ -197,7 +296,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities); } - for (i = 0; i < vsp1->pdata.rpf_count; ++i) { + for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *rpf; rpf = vsp1_rpf_create(vsp1, i); @@ -208,9 +307,20 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) vsp1->rpf[i] = rpf; list_add_tail(&rpf->entity.list_dev, &vsp1->entities); + + if (vsp1->info->uapi) { + struct vsp1_video *video = vsp1_video_create(vsp1, rpf); + + if (IS_ERR(video)) { + ret = PTR_ERR(video); + goto done; + } + + list_add_tail(&video->list, &vsp1->videos); + } } - if (vsp1->pdata.features & VSP1_HAS_SRU) { + if (vsp1->info->features & VSP1_HAS_SRU) { vsp1->sru = vsp1_sru_create(vsp1); if (IS_ERR(vsp1->sru)) { ret = PTR_ERR(vsp1->sru); @@ -220,7 +330,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities); } - for (i = 0; i < vsp1->pdata.uds_count; ++i) { + for (i = 0; i < vsp1->info->uds_count; ++i) { struct vsp1_uds *uds; uds = vsp1_uds_create(vsp1, i); @@ -233,7 +343,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) list_add_tail(&uds->entity.list_dev, &vsp1->entities); } - for (i = 0; i < vsp1->pdata.wpf_count; ++i) { + for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf; wpf = vsp1_wpf_create(vsp1, i); @@ -244,6 +354,18 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) vsp1->wpf[i] = wpf; list_add_tail(&wpf->entity.list_dev, &vsp1->entities); + + if (vsp1->info->uapi) { + struct vsp1_video *video = vsp1_video_create(vsp1, wpf); + + if (IS_ERR(video)) { + ret = PTR_ERR(video); + goto done; + } + + list_add_tail(&video->list, &vsp1->videos); + wpf->entity.sink = &video->video.entity; + } } /* Register all subdevs. */ @@ -255,34 +377,23 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) } /* Create links. */ - list_for_each_entry(entity, &vsp1->entities, list_dev) { - if (entity->type == VSP1_ENTITY_WPF) { - ret = vsp1_wpf_create_links(vsp1, entity); - if (ret < 0) - goto done; - } else if (entity->type == VSP1_ENTITY_RPF) { - ret = vsp1_rpf_create_links(vsp1, entity); - if (ret < 0) - goto done; - } - - if (entity->type != VSP1_ENTITY_LIF && - entity->type != VSP1_ENTITY_RPF) { - ret = vsp1_create_links(vsp1, entity); - if (ret < 0) - goto done; - } - } + if (vsp1->info->uapi) + ret = vsp1_uapi_create_links(vsp1); + else + ret = vsp1_drm_create_links(vsp1); + if (ret < 0) + goto done; - if (vsp1->pdata.features & VSP1_HAS_LIF) { - ret = media_create_pad_link( - &vsp1->wpf[0]->entity.subdev.entity, RWPF_PAD_SOURCE, - &vsp1->lif->entity.subdev.entity, LIF_PAD_SINK, 0); - if (ret < 0) - return ret; + /* Register subdev nodes if the userspace API is enabled or initialize + * the DRM pipeline otherwise. + */ + if (vsp1->info->uapi) { + vsp1->use_dl = false; + ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev); + } else { + vsp1->use_dl = true; + ret = vsp1_drm_init(vsp1); } - - ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev); if (ret < 0) goto done; @@ -295,42 +406,51 @@ done: return ret; } -static int vsp1_device_init(struct vsp1_device *vsp1) +int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index) { - unsigned int i; + unsigned int timeout; u32 status; - /* Reset any channel that might be running. */ status = vsp1_read(vsp1, VI6_STATUS); + if (!(status & VI6_STATUS_SYS_ACT(index))) + return 0; - for (i = 0; i < vsp1->pdata.wpf_count; ++i) { - unsigned int timeout; + vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(index)); + for (timeout = 10; timeout > 0; --timeout) { + status = vsp1_read(vsp1, VI6_STATUS); + if (!(status & VI6_STATUS_SYS_ACT(index))) + break; - if (!(status & VI6_STATUS_SYS_ACT(i))) - continue; + usleep_range(1000, 2000); + } - vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(i)); - for (timeout = 10; timeout > 0; --timeout) { - status = vsp1_read(vsp1, VI6_STATUS); - if (!(status & VI6_STATUS_SYS_ACT(i))) - break; + if (!timeout) { + dev_err(vsp1->dev, "failed to reset wpf.%u\n", index); + return -ETIMEDOUT; + } - usleep_range(1000, 2000); - } + return 0; +} - if (!timeout) { - dev_err(vsp1->dev, "failed to reset wpf.%u\n", i); - return -ETIMEDOUT; - } +static int vsp1_device_init(struct vsp1_device *vsp1) +{ + unsigned int i; + int ret; + + /* Reset any channel that might be running. */ + for (i = 0; i < vsp1->info->wpf_count; ++i) { + ret = vsp1_reset_wpf(vsp1, i); + if (ret < 0) + return ret; } vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << VI6_CLK_DCSWT_CSTPW_SHIFT) | (8 << VI6_CLK_DCSWT_CSTRW_SHIFT)); - for (i = 0; i < vsp1->pdata.rpf_count; ++i) + for (i = 0; i < vsp1->info->rpf_count; ++i) vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED); - for (i = 0; i < vsp1->pdata.uds_count; ++i) + for (i = 0; i < vsp1->info->uds_count; ++i) vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED); @@ -345,6 +465,9 @@ static int vsp1_device_init(struct vsp1_device *vsp1) vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) | (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT)); + if (vsp1->use_dl) + vsp1_dl_setup(vsp1); + return 0; } @@ -444,48 +567,76 @@ static const struct dev_pm_ops vsp1_pm_ops = { * Platform Driver */ -static int vsp1_parse_dt(struct vsp1_device *vsp1) -{ - struct device_node *np = vsp1->dev->of_node; - struct vsp1_platform_data *pdata = &vsp1->pdata; - - if (of_property_read_bool(np, "renesas,has-lif")) - pdata->features |= VSP1_HAS_LIF; - if (of_property_read_bool(np, "renesas,has-lut")) - pdata->features |= VSP1_HAS_LUT; - if (of_property_read_bool(np, "renesas,has-sru")) - pdata->features |= VSP1_HAS_SRU; - - of_property_read_u32(np, "renesas,#rpf", &pdata->rpf_count); - of_property_read_u32(np, "renesas,#uds", &pdata->uds_count); - of_property_read_u32(np, "renesas,#wpf", &pdata->wpf_count); - - if (pdata->rpf_count <= 0 || pdata->rpf_count > VSP1_MAX_RPF) { - dev_err(vsp1->dev, "invalid number of RPF (%u)\n", - pdata->rpf_count); - return -EINVAL; - } - - if (pdata->uds_count <= 0 || pdata->uds_count > VSP1_MAX_UDS) { - dev_err(vsp1->dev, "invalid number of UDS (%u)\n", - pdata->uds_count); - return -EINVAL; - } - - if (pdata->wpf_count <= 0 || pdata->wpf_count > VSP1_MAX_WPF) { - dev_err(vsp1->dev, "invalid number of WPF (%u)\n", - pdata->wpf_count); - return -EINVAL; - } - - return 0; -} +static const struct vsp1_device_info vsp1_device_infos[] = { + { + .version = VI6_IP_VERSION_MODEL_VSPS_H2, + .features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU, + .rpf_count = 5, + .uds_count = 3, + .wpf_count = 4, + .num_bru_inputs = 4, + .uapi = true, + }, { + .version = VI6_IP_VERSION_MODEL_VSPR_H2, + .features = VSP1_HAS_BRU | VSP1_HAS_SRU, + .rpf_count = 5, + .uds_count = 1, + .wpf_count = 4, + .num_bru_inputs = 4, + .uapi = true, + }, { + .version = VI6_IP_VERSION_MODEL_VSPD_GEN2, + .features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT, + .rpf_count = 4, + .uds_count = 1, + .wpf_count = 4, + .num_bru_inputs = 4, + .uapi = true, + }, { + .version = VI6_IP_VERSION_MODEL_VSPS_M2, + .features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU, + .rpf_count = 5, + .uds_count = 3, + .wpf_count = 4, + .num_bru_inputs = 4, + .uapi = true, + }, { + .version = VI6_IP_VERSION_MODEL_VSPI_GEN3, + .features = VSP1_HAS_LUT | VSP1_HAS_SRU, + .rpf_count = 1, + .uds_count = 1, + .wpf_count = 1, + .uapi = true, + }, { + .version = VI6_IP_VERSION_MODEL_VSPBD_GEN3, + .features = VSP1_HAS_BRU, + .rpf_count = 5, + .wpf_count = 1, + .num_bru_inputs = 5, + .uapi = true, + }, { + .version = VI6_IP_VERSION_MODEL_VSPBC_GEN3, + .features = VSP1_HAS_BRU | VSP1_HAS_LUT, + .rpf_count = 5, + .wpf_count = 1, + .num_bru_inputs = 5, + .uapi = true, + }, { + .version = VI6_IP_VERSION_MODEL_VSPD_GEN3, + .features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT, + .rpf_count = 5, + .wpf_count = 2, + .num_bru_inputs = 5, + }, +}; static int vsp1_probe(struct platform_device *pdev) { struct vsp1_device *vsp1; struct resource *irq; struct resource *io; + unsigned int i; + u32 version; int ret; vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL); @@ -495,10 +646,7 @@ static int vsp1_probe(struct platform_device *pdev) vsp1->dev = &pdev->dev; mutex_init(&vsp1->lock); INIT_LIST_HEAD(&vsp1->entities); - - ret = vsp1_parse_dt(vsp1); - if (ret < 0) - return ret; + INIT_LIST_HEAD(&vsp1->videos); /* I/O, IRQ and clock resources */ io = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -525,6 +673,29 @@ static int vsp1_probe(struct platform_device *pdev) return ret; } + /* Configure device parameters based on the version register. */ + ret = clk_prepare_enable(vsp1->clock); + if (ret < 0) + return ret; + + version = vsp1_read(vsp1, VI6_IP_VERSION); + clk_disable_unprepare(vsp1->clock); + + for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) { + if ((version & VI6_IP_VERSION_MODEL_MASK) == + vsp1_device_infos[i].version) { + vsp1->info = &vsp1_device_infos[i]; + break; + } + } + + if (!vsp1->info) { + dev_err(&pdev->dev, "unsupported IP version 0x%08x\n", version); + return -ENXIO; + } + + dev_dbg(&pdev->dev, "IP version 0x%08x\n", version); + /* Instanciate entities */ ret = vsp1_create_entities(vsp1); if (ret < 0) { @@ -548,6 +719,7 @@ static int vsp1_remove(struct platform_device *pdev) static const struct of_device_id vsp1_of_match[] = { { .compatible = "renesas,vsp1" }, + { .compatible = "renesas,vsp2" }, { }, }; diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c index d7308530952f..20a78fbd3691 100644 --- a/drivers/media/platform/vsp1/vsp1_entity.c +++ b/drivers/media/platform/vsp1/vsp1_entity.c @@ -20,7 +20,6 @@ #include "vsp1.h" #include "vsp1_entity.h" -#include "vsp1_video.h" bool vsp1_entity_is_streaming(struct vsp1_entity *entity) { @@ -46,7 +45,7 @@ int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming) if (!streaming) return 0; - if (!entity->subdev.ctrl_handler) + if (!entity->vsp1->info->uapi || !entity->subdev.ctrl_handler) return 0; ret = v4l2_ctrl_handler_setup(entity->subdev.ctrl_handler); @@ -59,6 +58,18 @@ int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming) return ret; } +void vsp1_entity_route_setup(struct vsp1_entity *source) +{ + struct vsp1_entity *sink; + + if (source->route->reg == 0) + return; + + sink = container_of(source->sink, struct vsp1_entity, subdev.entity); + vsp1_mod_write(source, source->route->reg, + sink->route->inputs[source->sink_pad]); +} + /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ @@ -120,9 +131,9 @@ const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops = { * Media Operations */ -static int vsp1_entity_link_setup(struct media_entity *entity, - const struct media_pad *local, - const struct media_pad *remote, u32 flags) +int vsp1_entity_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) { struct vsp1_entity *source; @@ -147,11 +158,6 @@ static int vsp1_entity_link_setup(struct media_entity *entity, return 0; } -const struct media_entity_operations vsp1_media_ops = { - .link_setup = vsp1_entity_link_setup, - .link_validate = v4l2_subdev_link_validate, -}; - /* ----------------------------------------------------------------------------- * Initialization */ @@ -159,7 +165,8 @@ const struct media_entity_operations vsp1_media_ops = { static const struct vsp1_route vsp1_routes[] = { { VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE, { VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1), - VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3), } }, + VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3), + VI6_DPR_NODE_BRU_IN(4) } }, { VSP1_ENTITY_HSI, 0, VI6_DPR_HSI_ROUTE, { VI6_DPR_NODE_HSI, } }, { VSP1_ENTITY_HST, 0, VI6_DPR_HST_ROUTE, { VI6_DPR_NODE_HST, } }, { VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, } }, @@ -225,8 +232,6 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity, void vsp1_entity_destroy(struct vsp1_entity *entity) { - if (entity->video) - vsp1_video_cleanup(entity->video); if (entity->subdev.ctrl_handler) v4l2_ctrl_handler_free(entity->subdev.ctrl_handler); media_entity_cleanup(&entity->subdev.entity); diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h index 8867a5787c28..83570dfde8ec 100644 --- a/drivers/media/platform/vsp1/vsp1_entity.h +++ b/drivers/media/platform/vsp1/vsp1_entity.h @@ -19,7 +19,6 @@ #include <media/v4l2-subdev.h> struct vsp1_device; -struct vsp1_video; enum vsp1_entity_type { VSP1_ENTITY_BRU, @@ -33,6 +32,8 @@ enum vsp1_entity_type { VSP1_ENTITY_WPF, }; +#define VSP1_ENTITY_MAX_INPUTS 5 /* For the BRU */ + /* * struct vsp1_route - Entity routing configuration * @type: Entity type this routing entry is associated with @@ -49,7 +50,7 @@ struct vsp1_route { enum vsp1_entity_type type; unsigned int index; unsigned int reg; - unsigned int inputs[4]; + unsigned int inputs[VSP1_ENTITY_MAX_INPUTS]; }; struct vsp1_entity { @@ -71,8 +72,6 @@ struct vsp1_entity { struct v4l2_subdev subdev; struct v4l2_mbus_framefmt *formats; - struct vsp1_video *video; - spinlock_t lock; /* Protects the streaming field */ bool streaming; }; @@ -87,7 +86,10 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity, void vsp1_entity_destroy(struct vsp1_entity *entity); extern const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops; -extern const struct media_entity_operations vsp1_media_ops; + +int vsp1_entity_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags); struct v4l2_mbus_framefmt * vsp1_entity_get_pad_format(struct vsp1_entity *entity, @@ -99,4 +101,6 @@ void vsp1_entity_init_formats(struct v4l2_subdev *subdev, bool vsp1_entity_is_streaming(struct vsp1_entity *entity); int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming); +void vsp1_entity_route_setup(struct vsp1_entity *source); + #endif /* __VSP1_ENTITY_H__ */ diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c index 8ffb817ae525..c1087cff31a0 100644 --- a/drivers/media/platform/vsp1/vsp1_hsit.c +++ b/drivers/media/platform/vsp1/vsp1_hsit.c @@ -203,7 +203,7 @@ struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse) subdev = &hsit->entity.subdev; v4l2_subdev_init(subdev, &hsit_ops); - subdev->entity.ops = &vsp1_media_ops; + subdev->entity.ops = &vsp1->media_ops; subdev->internal_ops = &vsp1_subdev_internal_ops; snprintf(subdev->name, sizeof(subdev->name), "%s %s", dev_name(vsp1->dev), inverse ? "hsi" : "hst"); diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c index 39fa5ef20fbb..433853ce8dbf 100644 --- a/drivers/media/platform/vsp1/vsp1_lif.c +++ b/drivers/media/platform/vsp1/vsp1_lif.c @@ -26,14 +26,9 @@ * Device Access */ -static inline u32 vsp1_lif_read(struct vsp1_lif *lif, u32 reg) -{ - return vsp1_read(lif->entity.vsp1, reg); -} - static inline void vsp1_lif_write(struct vsp1_lif *lif, u32 reg, u32 data) { - vsp1_write(lif->entity.vsp1, reg, data); + vsp1_mod_write(&lif->entity, reg, data); } /* ----------------------------------------------------------------------------- @@ -49,7 +44,7 @@ static int lif_s_stream(struct v4l2_subdev *subdev, int enable) unsigned int lbth = 200; if (!enable) { - vsp1_lif_write(lif, VI6_LIF_CTRL, 0); + vsp1_write(lif->entity.vsp1, VI6_LIF_CTRL, 0); return 0; } @@ -228,7 +223,7 @@ struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1) subdev = &lif->entity.subdev; v4l2_subdev_init(subdev, &lif_ops); - subdev->entity.ops = &vsp1_media_ops; + subdev->entity.ops = &vsp1->media_ops; subdev->internal_ops = &vsp1_subdev_internal_ops; snprintf(subdev->name, sizeof(subdev->name), "%s lif", dev_name(vsp1->dev)); diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c index 656ec272a414..4b89095e7b5f 100644 --- a/drivers/media/platform/vsp1/vsp1_lut.c +++ b/drivers/media/platform/vsp1/vsp1_lut.c @@ -27,11 +27,6 @@ * Device Access */ -static inline u32 vsp1_lut_read(struct vsp1_lut *lut, u32 reg) -{ - return vsp1_read(lut->entity.vsp1, reg); -} - static inline void vsp1_lut_write(struct vsp1_lut *lut, u32 reg, u32 data) { vsp1_write(lut->entity.vsp1, reg, data); @@ -242,7 +237,7 @@ struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1) subdev = &lut->entity.subdev; v4l2_subdev_init(subdev, &lut_ops); - subdev->entity.ops = &vsp1_media_ops; + subdev->entity.ops = &vsp1->media_ops; subdev->internal_ops = &vsp1_subdev_internal_ops; snprintf(subdev->name, sizeof(subdev->name), "%s lut", dev_name(vsp1->dev)); diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c new file mode 100644 index 000000000000..6659f06b1643 --- /dev/null +++ b/drivers/media/platform/vsp1/vsp1_pipe.c @@ -0,0 +1,426 @@ +/* + * vsp1_pipe.c -- R-Car VSP1 Pipeline + * + * Copyright (C) 2013-2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/delay.h> +#include <linux/list.h> +#include <linux/sched.h> +#include <linux/wait.h> + +#include <media/media-entity.h> +#include <media/v4l2-subdev.h> + +#include "vsp1.h" +#include "vsp1_bru.h" +#include "vsp1_dl.h" +#include "vsp1_entity.h" +#include "vsp1_pipe.h" +#include "vsp1_rwpf.h" +#include "vsp1_uds.h" + +/* ----------------------------------------------------------------------------- + * Helper Functions + */ + +static const struct vsp1_format_info vsp1_video_formats[] = { + { V4L2_PIX_FMT_RGB332, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 1, { 8, 0, 0 }, false, false, 1, 1, false }, + { V4L2_PIX_FMT_ARGB444, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS, + 1, { 16, 0, 0 }, false, false, 1, 1, true }, + { V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS, + 1, { 16, 0, 0 }, false, false, 1, 1, true }, + { V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS, + 1, { 16, 0, 0 }, false, false, 1, 1, true }, + { V4L2_PIX_FMT_XRGB555, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS, + 1, { 16, 0, 0 }, false, false, 1, 1, false }, + { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS, + 1, { 16, 0, 0 }, false, false, 1, 1, false }, + { V4L2_PIX_FMT_BGR24, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 1, { 24, 0, 0 }, false, false, 1, 1, false }, + { V4L2_PIX_FMT_RGB24, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 1, { 24, 0, 0 }, false, false, 1, 1, false }, + { V4L2_PIX_FMT_ABGR32, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, + 1, { 32, 0, 0 }, false, false, 1, 1, true }, + { V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, + 1, { 32, 0, 0 }, false, false, 1, 1, false }, + { V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 1, { 32, 0, 0 }, false, false, 1, 1, true }, + { V4L2_PIX_FMT_XRGB32, MEDIA_BUS_FMT_ARGB8888_1X32, + VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 1, { 32, 0, 0 }, false, false, 1, 1, false }, + { V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 1, { 16, 0, 0 }, false, false, 2, 1, false }, + { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 1, { 16, 0, 0 }, false, true, 2, 1, false }, + { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 1, { 16, 0, 0 }, true, false, 2, 1, false }, + { V4L2_PIX_FMT_YVYU, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 1, { 16, 0, 0 }, true, true, 2, 1, false }, + { V4L2_PIX_FMT_NV12M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 2, { 8, 16, 0 }, false, false, 2, 2, false }, + { V4L2_PIX_FMT_NV21M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 2, { 8, 16, 0 }, false, true, 2, 2, false }, + { V4L2_PIX_FMT_NV16M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 2, { 8, 16, 0 }, false, false, 2, 1, false }, + { V4L2_PIX_FMT_NV61M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 2, { 8, 16, 0 }, false, true, 2, 1, false }, + { V4L2_PIX_FMT_YUV420M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 3, { 8, 8, 8 }, false, false, 2, 2, false }, + { V4L2_PIX_FMT_YVU420M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 3, { 8, 8, 8 }, false, true, 2, 2, false }, + { V4L2_PIX_FMT_YUV422M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 3, { 8, 8, 8 }, false, false, 2, 1, false }, + { V4L2_PIX_FMT_YVU422M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 3, { 8, 8, 8 }, false, true, 2, 1, false }, + { V4L2_PIX_FMT_YUV444M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 3, { 8, 8, 8 }, false, false, 1, 1, false }, + { V4L2_PIX_FMT_YVU444M, MEDIA_BUS_FMT_AYUV8_1X32, + VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | + VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, + 3, { 8, 8, 8 }, false, true, 1, 1, false }, +}; + +/* + * vsp1_get_format_info - Retrieve format information for a 4CC + * @fourcc: the format 4CC + * + * Return a pointer to the format information structure corresponding to the + * given V4L2 format 4CC, or NULL if no corresponding format can be found. + */ +const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) { + const struct vsp1_format_info *info = &vsp1_video_formats[i]; + + if (info->fourcc == fourcc) + return info; + } + + return NULL; +} + +/* ----------------------------------------------------------------------------- + * Pipeline Management + */ + +void vsp1_pipeline_reset(struct vsp1_pipeline *pipe) +{ + unsigned int i; + + if (pipe->bru) { + struct vsp1_bru *bru = to_bru(&pipe->bru->subdev); + + for (i = 0; i < ARRAY_SIZE(bru->inputs); ++i) + bru->inputs[i].rpf = NULL; + } + + for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i) + pipe->inputs[i] = NULL; + + INIT_LIST_HEAD(&pipe->entities); + pipe->state = VSP1_PIPELINE_STOPPED; + pipe->buffers_ready = 0; + pipe->num_inputs = 0; + pipe->output = NULL; + pipe->bru = NULL; + pipe->lif = NULL; + pipe->uds = NULL; +} + +void vsp1_pipeline_init(struct vsp1_pipeline *pipe) +{ + mutex_init(&pipe->lock); + spin_lock_init(&pipe->irqlock); + init_waitqueue_head(&pipe->wq); + + INIT_LIST_HEAD(&pipe->entities); + pipe->state = VSP1_PIPELINE_STOPPED; +} + +void vsp1_pipeline_run(struct vsp1_pipeline *pipe) +{ + struct vsp1_device *vsp1 = pipe->output->entity.vsp1; + + if (pipe->state == VSP1_PIPELINE_STOPPED) { + vsp1_write(vsp1, VI6_CMD(pipe->output->entity.index), + VI6_CMD_STRCMD); + pipe->state = VSP1_PIPELINE_RUNNING; + } + + pipe->buffers_ready = 0; +} + +bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe) +{ + unsigned long flags; + bool stopped; + + spin_lock_irqsave(&pipe->irqlock, flags); + stopped = pipe->state == VSP1_PIPELINE_STOPPED; + spin_unlock_irqrestore(&pipe->irqlock, flags); + + return stopped; +} + +int vsp1_pipeline_stop(struct vsp1_pipeline *pipe) +{ + struct vsp1_entity *entity; + unsigned long flags; + int ret; + + if (pipe->dl) { + /* When using display lists in continuous frame mode the only + * way to stop the pipeline is to reset the hardware. + */ + ret = vsp1_reset_wpf(pipe->output->entity.vsp1, + pipe->output->entity.index); + if (ret == 0) { + spin_lock_irqsave(&pipe->irqlock, flags); + pipe->state = VSP1_PIPELINE_STOPPED; + spin_unlock_irqrestore(&pipe->irqlock, flags); + } + } else { + /* Otherwise just request a stop and wait. */ + spin_lock_irqsave(&pipe->irqlock, flags); + if (pipe->state == VSP1_PIPELINE_RUNNING) + pipe->state = VSP1_PIPELINE_STOPPING; + spin_unlock_irqrestore(&pipe->irqlock, flags); + + ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe), + msecs_to_jiffies(500)); + ret = ret == 0 ? -ETIMEDOUT : 0; + } + + list_for_each_entry(entity, &pipe->entities, list_pipe) { + if (entity->route && entity->route->reg) + vsp1_write(entity->vsp1, entity->route->reg, + VI6_DPR_NODE_UNUSED); + + v4l2_subdev_call(&entity->subdev, video, s_stream, 0); + } + + return ret; +} + +bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe) +{ + unsigned int mask; + + mask = ((1 << pipe->num_inputs) - 1) << 1; + if (!pipe->lif) + mask |= 1 << 0; + + return pipe->buffers_ready == mask; +} + +void vsp1_pipeline_display_start(struct vsp1_pipeline *pipe) +{ + if (pipe->dl) + vsp1_dl_irq_display_start(pipe->dl); +} + +void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe) +{ + enum vsp1_pipeline_state state; + unsigned long flags; + + if (pipe == NULL) + return; + + if (pipe->dl) + vsp1_dl_irq_frame_end(pipe->dl); + + /* Signal frame end to the pipeline handler. */ + pipe->frame_end(pipe); + + spin_lock_irqsave(&pipe->irqlock, flags); + + state = pipe->state; + + /* When using display lists in continuous frame mode the pipeline is + * automatically restarted by the hardware. + */ + if (!pipe->dl) + pipe->state = VSP1_PIPELINE_STOPPED; + + /* If a stop has been requested, mark the pipeline as stopped and + * return. + */ + if (state == VSP1_PIPELINE_STOPPING) { + wake_up(&pipe->wq); + goto done; + } + + /* Restart the pipeline if ready. */ + if (vsp1_pipeline_ready(pipe)) + vsp1_pipeline_run(pipe); + +done: + spin_unlock_irqrestore(&pipe->irqlock, flags); +} + +/* + * Propagate the alpha value through the pipeline. + * + * As the UDS has restricted scaling capabilities when the alpha component needs + * to be scaled, we disable alpha scaling when the UDS input has a fixed alpha + * value. The UDS then outputs a fixed alpha value which needs to be programmed + * from the input RPF alpha. + */ +void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe, + struct vsp1_entity *input, + unsigned int alpha) +{ + struct vsp1_entity *entity; + struct media_pad *pad; + + pad = media_entity_remote_pad(&input->pads[RWPF_PAD_SOURCE]); + + while (pad) { + if (!is_media_entity_v4l2_subdev(pad->entity)) + break; + + entity = to_vsp1_entity(media_entity_to_v4l2_subdev(pad->entity)); + + /* The BRU background color has a fixed alpha value set to 255, + * the output alpha value is thus always equal to 255. + */ + if (entity->type == VSP1_ENTITY_BRU) + alpha = 255; + + if (entity->type == VSP1_ENTITY_UDS) { + struct vsp1_uds *uds = to_uds(&entity->subdev); + + vsp1_uds_set_alpha(uds, alpha); + break; + } + + pad = &entity->pads[entity->source_pad]; + pad = media_entity_remote_pad(pad); + } +} + +void vsp1_pipelines_suspend(struct vsp1_device *vsp1) +{ + unsigned long flags; + unsigned int i; + int ret; + + /* To avoid increasing the system suspend time needlessly, loop over the + * pipelines twice, first to set them all to the stopping state, and + * then to wait for the stop to complete. + */ + for (i = 0; i < vsp1->info->wpf_count; ++i) { + struct vsp1_rwpf *wpf = vsp1->wpf[i]; + struct vsp1_pipeline *pipe; + + if (wpf == NULL) + continue; + + pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity); + if (pipe == NULL) + continue; + + spin_lock_irqsave(&pipe->irqlock, flags); + if (pipe->state == VSP1_PIPELINE_RUNNING) + pipe->state = VSP1_PIPELINE_STOPPING; + spin_unlock_irqrestore(&pipe->irqlock, flags); + } + + for (i = 0; i < vsp1->info->wpf_count; ++i) { + struct vsp1_rwpf *wpf = vsp1->wpf[i]; + struct vsp1_pipeline *pipe; + + if (wpf == NULL) + continue; + + pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity); + if (pipe == NULL) + continue; + + ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe), + msecs_to_jiffies(500)); + if (ret == 0) + dev_warn(vsp1->dev, "pipeline %u stop timeout\n", + wpf->entity.index); + } +} + +void vsp1_pipelines_resume(struct vsp1_device *vsp1) +{ + unsigned int i; + + /* Resume pipeline all running pipelines. */ + for (i = 0; i < vsp1->info->wpf_count; ++i) { + struct vsp1_rwpf *wpf = vsp1->wpf[i]; + struct vsp1_pipeline *pipe; + + if (wpf == NULL) + continue; + + pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity); + if (pipe == NULL) + continue; + + if (vsp1_pipeline_ready(pipe)) + vsp1_pipeline_run(pipe); + } +} diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h new file mode 100644 index 000000000000..b2f3a8a896c9 --- /dev/null +++ b/drivers/media/platform/vsp1/vsp1_pipe.h @@ -0,0 +1,134 @@ +/* + * vsp1_pipe.h -- R-Car VSP1 Pipeline + * + * Copyright (C) 2013-2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __VSP1_PIPE_H__ +#define __VSP1_PIPE_H__ + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/wait.h> + +#include <media/media-entity.h> + +struct vsp1_dl; +struct vsp1_rwpf; + +/* + * struct vsp1_format_info - VSP1 video format description + * @mbus: media bus format code + * @fourcc: V4L2 pixel format FCC identifier + * @planes: number of planes + * @bpp: bits per pixel + * @hwfmt: VSP1 hardware format + * @swap_yc: the Y and C components are swapped (Y comes before C) + * @swap_uv: the U and V components are swapped (V comes before U) + * @hsub: horizontal subsampling factor + * @vsub: vertical subsampling factor + * @alpha: has an alpha channel + */ +struct vsp1_format_info { + u32 fourcc; + unsigned int mbus; + unsigned int hwfmt; + unsigned int swap; + unsigned int planes; + unsigned int bpp[3]; + bool swap_yc; + bool swap_uv; + unsigned int hsub; + unsigned int vsub; + bool alpha; +}; + +enum vsp1_pipeline_state { + VSP1_PIPELINE_STOPPED, + VSP1_PIPELINE_RUNNING, + VSP1_PIPELINE_STOPPING, +}; + +/* + * struct vsp1_pipeline - A VSP1 hardware pipeline + * @pipe: the media pipeline + * @irqlock: protects the pipeline state + * @state: current state + * @wq: work queue to wait for state change completion + * @frame_end: frame end interrupt handler + * @lock: protects the pipeline use count and stream count + * @use_count: number of video nodes using the pipeline + * @stream_count: number of streaming video nodes + * @buffers_ready: bitmask of RPFs and WPFs with at least one buffer available + * @num_inputs: number of RPFs + * @inputs: array of RPFs in the pipeline (indexed by RPF index) + * @output: WPF at the output of the pipeline + * @bru: BRU entity, if present + * @lif: LIF entity, if present + * @uds: UDS entity, if present + * @uds_input: entity at the input of the UDS, if the UDS is present + * @entities: list of entities in the pipeline + * @dl: display list associated with the pipeline + */ +struct vsp1_pipeline { + struct media_pipeline pipe; + + spinlock_t irqlock; + enum vsp1_pipeline_state state; + wait_queue_head_t wq; + + void (*frame_end)(struct vsp1_pipeline *pipe); + + struct mutex lock; + unsigned int use_count; + unsigned int stream_count; + unsigned int buffers_ready; + + unsigned int num_inputs; + struct vsp1_rwpf *inputs[VSP1_MAX_RPF]; + struct vsp1_rwpf *output; + struct vsp1_entity *bru; + struct vsp1_entity *lif; + struct vsp1_entity *uds; + struct vsp1_entity *uds_input; + + struct list_head entities; + + struct vsp1_dl *dl; +}; + +static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e) +{ + if (likely(e->pipe)) + return container_of(e->pipe, struct vsp1_pipeline, pipe); + else + return NULL; +} + +void vsp1_pipeline_reset(struct vsp1_pipeline *pipe); +void vsp1_pipeline_init(struct vsp1_pipeline *pipe); + +void vsp1_pipeline_run(struct vsp1_pipeline *pipe); +bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe); +int vsp1_pipeline_stop(struct vsp1_pipeline *pipe); +bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe); + +void vsp1_pipeline_display_start(struct vsp1_pipeline *pipe); +void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe); + +void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe, + struct vsp1_entity *input, + unsigned int alpha); + +void vsp1_pipelines_suspend(struct vsp1_device *vsp1); +void vsp1_pipelines_resume(struct vsp1_device *vsp1); + +const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc); + +#endif /* __VSP1_PIPE_H__ */ diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h index 25b48738b147..069216f0eb44 100644 --- a/drivers/media/platform/vsp1/vsp1_regs.h +++ b/drivers/media/platform/vsp1/vsp1_regs.h @@ -46,7 +46,7 @@ #define VI6_DISP_IRQ_ENB_LNEE(n) (1 << (n)) #define VI6_DISP_IRQ_STA 0x007c -#define VI6_DISP_IRQ_STA_DSE (1 << 8) +#define VI6_DISP_IRQ_STA_DST (1 << 8) #define VI6_DISP_IRQ_STA_MAE (1 << 5) #define VI6_DISP_IRQ_STA_LNE(n) (1 << (n)) @@ -322,7 +322,7 @@ #define VI6_DPR_NODE_SRU 16 #define VI6_DPR_NODE_UDS(n) (17 + (n)) #define VI6_DPR_NODE_LUT 22 -#define VI6_DPR_NODE_BRU_IN(n) (23 + (n)) +#define VI6_DPR_NODE_BRU_IN(n) (((n) <= 3) ? 23 + (n) : 49) #define VI6_DPR_NODE_BRU_OUT 27 #define VI6_DPR_NODE_CLU 29 #define VI6_DPR_NODE_HST 30 @@ -504,12 +504,12 @@ #define VI6_BRU_VIRRPF_COL_BCB_MASK (0xff << 0) #define VI6_BRU_VIRRPF_COL_BCB_SHIFT 0 -#define VI6_BRU_CTRL(n) (0x2c10 + (n) * 8) +#define VI6_BRU_CTRL(n) (0x2c10 + (n) * 8 + ((n) <= 3 ? 0 : 4)) #define VI6_BRU_CTRL_RBC (1 << 31) -#define VI6_BRU_CTRL_DSTSEL_BRUIN(n) ((n) << 20) +#define VI6_BRU_CTRL_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20) #define VI6_BRU_CTRL_DSTSEL_VRPF (4 << 20) #define VI6_BRU_CTRL_DSTSEL_MASK (7 << 20) -#define VI6_BRU_CTRL_SRCSEL_BRUIN(n) ((n) << 16) +#define VI6_BRU_CTRL_SRCSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 16) #define VI6_BRU_CTRL_SRCSEL_VRPF (4 << 16) #define VI6_BRU_CTRL_SRCSEL_MASK (7 << 16) #define VI6_BRU_CTRL_CROP(rop) ((rop) << 4) @@ -517,7 +517,7 @@ #define VI6_BRU_CTRL_AROP(rop) ((rop) << 0) #define VI6_BRU_CTRL_AROP_MASK (0xf << 0) -#define VI6_BRU_BLD(n) (0x2c14 + (n) * 8) +#define VI6_BRU_BLD(n) (0x2c14 + (n) * 8 + ((n) <= 3 ? 0 : 4)) #define VI6_BRU_BLD_CBES (1 << 31) #define VI6_BRU_BLD_CCMDX_DST_A (0 << 28) #define VI6_BRU_BLD_CCMDX_255_DST_A (1 << 28) @@ -551,7 +551,7 @@ #define VI6_BRU_BLD_COEFY_SHIFT 0 #define VI6_BRU_ROP 0x2c30 -#define VI6_BRU_ROP_DSTSEL_BRUIN(n) ((n) << 20) +#define VI6_BRU_ROP_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20) #define VI6_BRU_ROP_DSTSEL_VRPF (4 << 20) #define VI6_BRU_ROP_DSTSEL_MASK (7 << 20) #define VI6_BRU_ROP_CROP(rop) ((rop) << 4) @@ -625,6 +625,24 @@ #define VI6_SECURITY_CTRL1 0x3d04 /* ----------------------------------------------------------------------------- + * IP Version Registers + */ + +#define VI6_IP_VERSION 0x3f00 +#define VI6_IP_VERSION_MODEL_MASK (0xff << 8) +#define VI6_IP_VERSION_MODEL_VSPS_H2 (0x09 << 8) +#define VI6_IP_VERSION_MODEL_VSPR_H2 (0x0a << 8) +#define VI6_IP_VERSION_MODEL_VSPD_GEN2 (0x0b << 8) +#define VI6_IP_VERSION_MODEL_VSPS_M2 (0x0c << 8) +#define VI6_IP_VERSION_MODEL_VSPI_GEN3 (0x14 << 8) +#define VI6_IP_VERSION_MODEL_VSPBD_GEN3 (0x15 << 8) +#define VI6_IP_VERSION_MODEL_VSPBC_GEN3 (0x16 << 8) +#define VI6_IP_VERSION_MODEL_VSPD_GEN3 (0x17 << 8) +#define VI6_IP_VERSION_SOC_MASK (0xff << 0) +#define VI6_IP_VERSION_SOC_H (0x01 << 0) +#define VI6_IP_VERSION_SOC_M (0x02 << 0) + +/* ----------------------------------------------------------------------------- * RPF CLUT Registers */ diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c index 924538223d3e..5bc1d1574a43 100644 --- a/drivers/media/platform/vsp1/vsp1_rpf.c +++ b/drivers/media/platform/vsp1/vsp1_rpf.c @@ -26,16 +26,10 @@ * Device Access */ -static inline u32 vsp1_rpf_read(struct vsp1_rwpf *rpf, u32 reg) -{ - return vsp1_read(rpf->entity.vsp1, - reg + rpf->entity.index * VI6_RPF_OFFSET); -} - static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf, u32 reg, u32 data) { - vsp1_write(rpf->entity.vsp1, - reg + rpf->entity.index * VI6_RPF_OFFSET, data); + vsp1_mod_write(&rpf->entity, reg + rpf->entity.index * VI6_RPF_OFFSET, + data); } /* ----------------------------------------------------------------------------- @@ -74,9 +68,11 @@ static const struct v4l2_ctrl_ops rpf_ctrl_ops = { static int rpf_s_stream(struct v4l2_subdev *subdev, int enable) { + struct vsp1_pipeline *pipe = to_vsp1_pipeline(&subdev->entity); struct vsp1_rwpf *rpf = to_rwpf(subdev); - const struct vsp1_format_info *fmtinfo = rpf->video.fmtinfo; - const struct v4l2_pix_format_mplane *format = &rpf->video.format; + struct vsp1_device *vsp1 = rpf->entity.vsp1; + const struct vsp1_format_info *fmtinfo = rpf->fmtinfo; + const struct v4l2_pix_format_mplane *format = &rpf->format; const struct v4l2_rect *crop = &rpf->crop; u32 pstride; u32 infmt; @@ -154,6 +150,15 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable) vsp1_rpf_write(rpf, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_AEXT_EXT | (fmtinfo->alpha ? VI6_RPF_ALPH_SEL_ASEL_PACKED : VI6_RPF_ALPH_SEL_ASEL_FIXED)); + + if (vsp1->info->uapi) + mutex_lock(rpf->ctrls.lock); + vsp1_rpf_write(rpf, VI6_RPF_VRTCOL_SET, + rpf->alpha->cur.val << VI6_RPF_VRTCOL_SET_LAYA_SHIFT); + vsp1_pipeline_propagate_alpha(pipe, &rpf->entity, rpf->alpha->cur.val); + if (vsp1->info->uapi) + mutex_unlock(rpf->ctrls.lock); + vsp1_rpf_write(rpf, VI6_RPF_MSK_CTRL, 0); vsp1_rpf_write(rpf, VI6_RPF_CKEY_CTRL, 0); @@ -186,30 +191,28 @@ static struct v4l2_subdev_ops rpf_ops = { * Video Device Operations */ -static void rpf_vdev_queue(struct vsp1_video *video, - struct vsp1_video_buffer *buf) +static void rpf_set_memory(struct vsp1_rwpf *rpf, struct vsp1_rwpf_memory *mem) { - struct vsp1_rwpf *rpf = container_of(video, struct vsp1_rwpf, video); unsigned int i; for (i = 0; i < 3; ++i) - rpf->buf_addr[i] = buf->addr[i]; + rpf->buf_addr[i] = mem->addr[i]; if (!vsp1_entity_is_streaming(&rpf->entity)) return; vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y, - buf->addr[0] + rpf->offsets[0]); - if (buf->buf.vb2_buf.num_planes > 1) + mem->addr[0] + rpf->offsets[0]); + if (mem->num_planes > 1) vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0, - buf->addr[1] + rpf->offsets[1]); - if (buf->buf.vb2_buf.num_planes > 2) + mem->addr[1] + rpf->offsets[1]); + if (mem->num_planes > 2) vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1, - buf->addr[2] + rpf->offsets[1]); + mem->addr[2] + rpf->offsets[1]); } -static const struct vsp1_video_operations rpf_vdev_ops = { - .queue = rpf_vdev_queue, +static const struct vsp1_rwpf_operations rpf_vdev_ops = { + .set_memory = rpf_set_memory, }; /* ----------------------------------------------------------------------------- @@ -219,7 +222,6 @@ static const struct vsp1_video_operations rpf_vdev_ops = { struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index) { struct v4l2_subdev *subdev; - struct vsp1_video *video; struct vsp1_rwpf *rpf; int ret; @@ -227,6 +229,8 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index) if (rpf == NULL) return ERR_PTR(-ENOMEM); + rpf->ops = &rpf_vdev_ops; + rpf->max_width = RPF_MAX_WIDTH; rpf->max_height = RPF_MAX_HEIGHT; @@ -241,7 +245,7 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index) subdev = &rpf->entity.subdev; v4l2_subdev_init(subdev, &rpf_ops); - subdev->entity.ops = &vsp1_media_ops; + subdev->entity.ops = &vsp1->media_ops; subdev->internal_ops = &vsp1_subdev_internal_ops; snprintf(subdev->name, sizeof(subdev->name), "%s rpf.%u", dev_name(vsp1->dev), index); @@ -252,8 +256,9 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index) /* Initialize the control handler. */ v4l2_ctrl_handler_init(&rpf->ctrls, 1); - v4l2_ctrl_new_std(&rpf->ctrls, &rpf_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, - 0, 255, 1, 255); + rpf->alpha = v4l2_ctrl_new_std(&rpf->ctrls, &rpf_ctrl_ops, + V4L2_CID_ALPHA_COMPONENT, + 0, 255, 1, 255); rpf->entity.subdev.ctrl_handler = &rpf->ctrls; @@ -264,42 +269,9 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index) goto error; } - /* Initialize the video device. */ - video = &rpf->video; - - video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; - video->vsp1 = vsp1; - video->ops = &rpf_vdev_ops; - - ret = vsp1_video_init(video, &rpf->entity); - if (ret < 0) - goto error; - - rpf->entity.video = video; - return rpf; error: vsp1_entity_destroy(&rpf->entity); return ERR_PTR(ret); } - -/* - * vsp1_rpf_create_links() - RPF pads links creation - * @vsp1: Pointer to VSP1 device - * @entity: Pointer to VSP1 entity - * - * return negative error code or zero on success - */ -int vsp1_rpf_create_links(struct vsp1_device *vsp1, - struct vsp1_entity *entity) -{ - struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev); - - /* Connect the video device to the RPF. */ - return media_create_pad_link(&rpf->video.video.entity, 0, - &rpf->entity.subdev.entity, - RWPF_PAD_SINK, - MEDIA_LNK_FL_ENABLED | - MEDIA_LNK_FL_IMMUTABLE); -} diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h index 731d36e5258d..8e8235682ada 100644 --- a/drivers/media/platform/vsp1/vsp1_rwpf.h +++ b/drivers/media/platform/vsp1/vsp1_rwpf.h @@ -19,19 +19,39 @@ #include "vsp1.h" #include "vsp1_entity.h" -#include "vsp1_video.h" #define RWPF_PAD_SINK 0 #define RWPF_PAD_SOURCE 1 +struct v4l2_ctrl; +struct vsp1_rwpf; +struct vsp1_video; + +struct vsp1_rwpf_memory { + unsigned int num_planes; + dma_addr_t addr[3]; + unsigned int length[3]; +}; + +struct vsp1_rwpf_operations { + void (*set_memory)(struct vsp1_rwpf *rwpf, + struct vsp1_rwpf_memory *mem); +}; + struct vsp1_rwpf { struct vsp1_entity entity; - struct vsp1_video video; struct v4l2_ctrl_handler ctrls; + struct v4l2_ctrl *alpha; + + struct vsp1_video *video; + + const struct vsp1_rwpf_operations *ops; unsigned int max_width; unsigned int max_height; + struct v4l2_pix_format_mplane format; + const struct vsp1_format_info *fmtinfo; struct { unsigned int left; unsigned int top; @@ -50,11 +70,6 @@ static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev) struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index); struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index); -int vsp1_rpf_create_links(struct vsp1_device *vsp1, - struct vsp1_entity *entity); -int vsp1_wpf_create_links(struct vsp1_device *vsp1, - struct vsp1_entity *entity); - int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_mbus_code_enum *code); diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c index 6310acab60e7..cc09efbfb24f 100644 --- a/drivers/media/platform/vsp1/vsp1_sru.c +++ b/drivers/media/platform/vsp1/vsp1_sru.c @@ -151,10 +151,13 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable) /* Take the control handler lock to ensure that the CTRL0 value won't be * changed behind our back by a set control operation. */ - mutex_lock(sru->ctrls.lock); + if (sru->entity.vsp1->info->uapi) + mutex_lock(sru->ctrls.lock); ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0) & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK); - mutex_unlock(sru->ctrls.lock); + vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0); + if (sru->entity.vsp1->info->uapi) + mutex_unlock(sru->ctrls.lock); vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5); @@ -360,7 +363,7 @@ struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1) subdev = &sru->entity.subdev; v4l2_subdev_init(subdev, &sru_ops); - subdev->entity.ops = &vsp1_media_ops; + subdev->entity.ops = &vsp1->media_ops; subdev->internal_ops = &vsp1_subdev_internal_ops; snprintf(subdev->name, sizeof(subdev->name), "%s sru", dev_name(vsp1->dev)); diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c index ccc8243e3493..bba67770cf95 100644 --- a/drivers/media/platform/vsp1/vsp1_uds.c +++ b/drivers/media/platform/vsp1/vsp1_uds.c @@ -29,12 +29,6 @@ * Device Access */ -static inline u32 vsp1_uds_read(struct vsp1_uds *uds, u32 reg) -{ - return vsp1_read(uds->entity.vsp1, - reg + uds->entity.index * VI6_UDS_OFFSET); -} - static inline void vsp1_uds_write(struct vsp1_uds *uds, u32 reg, u32 data) { vsp1_write(uds->entity.vsp1, @@ -344,7 +338,7 @@ struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index) subdev = &uds->entity.subdev; v4l2_subdev_init(subdev, &uds_ops); - subdev->entity.ops = &vsp1_media_ops; + subdev->entity.ops = &vsp1->media_ops; subdev->internal_ops = &vsp1_subdev_internal_ops; snprintf(subdev->name, sizeof(subdev->name), "%s uds.%u", dev_name(vsp1->dev), index); diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index b4dca57d1ae3..61ee0f92c1e5 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -14,10 +14,10 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> -#include <linux/sched.h> #include <linux/slab.h> #include <linux/v4l2-mediabus.h> #include <linux/videodev2.h> +#include <linux/wait.h> #include <media/media-entity.h> #include <media/v4l2-dev.h> @@ -30,6 +30,7 @@ #include "vsp1.h" #include "vsp1_bru.h" #include "vsp1_entity.h" +#include "vsp1_pipe.h" #include "vsp1_rwpf.h" #include "vsp1_uds.h" #include "vsp1_video.h" @@ -47,113 +48,6 @@ * Helper functions */ -static const struct vsp1_format_info vsp1_video_formats[] = { - { V4L2_PIX_FMT_RGB332, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 1, { 8, 0, 0 }, false, false, 1, 1, false }, - { V4L2_PIX_FMT_ARGB444, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS, - 1, { 16, 0, 0 }, false, false, 1, 1, true }, - { V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS, - 1, { 16, 0, 0 }, false, false, 1, 1, true }, - { V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS, - 1, { 16, 0, 0 }, false, false, 1, 1, true }, - { V4L2_PIX_FMT_XRGB555, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS, - 1, { 16, 0, 0 }, false, false, 1, 1, false }, - { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS, - 1, { 16, 0, 0 }, false, false, 1, 1, false }, - { V4L2_PIX_FMT_BGR24, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 1, { 24, 0, 0 }, false, false, 1, 1, false }, - { V4L2_PIX_FMT_RGB24, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 1, { 24, 0, 0 }, false, false, 1, 1, false }, - { V4L2_PIX_FMT_ABGR32, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, - 1, { 32, 0, 0 }, false, false, 1, 1, true }, - { V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, - 1, { 32, 0, 0 }, false, false, 1, 1, false }, - { V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 1, { 32, 0, 0 }, false, false, 1, 1, true }, - { V4L2_PIX_FMT_XRGB32, MEDIA_BUS_FMT_ARGB8888_1X32, - VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 1, { 32, 0, 0 }, false, false, 1, 1, false }, - { V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32, - VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 1, { 16, 0, 0 }, false, false, 2, 1, false }, - { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32, - VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 1, { 16, 0, 0 }, false, true, 2, 1, false }, - { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32, - VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 1, { 16, 0, 0 }, true, false, 2, 1, false }, - { V4L2_PIX_FMT_YVYU, MEDIA_BUS_FMT_AYUV8_1X32, - VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 1, { 16, 0, 0 }, true, true, 2, 1, false }, - { V4L2_PIX_FMT_NV12M, MEDIA_BUS_FMT_AYUV8_1X32, - VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 2, { 8, 16, 0 }, false, false, 2, 2, false }, - { V4L2_PIX_FMT_NV21M, MEDIA_BUS_FMT_AYUV8_1X32, - VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 2, { 8, 16, 0 }, false, true, 2, 2, false }, - { V4L2_PIX_FMT_NV16M, MEDIA_BUS_FMT_AYUV8_1X32, - VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 2, { 8, 16, 0 }, false, false, 2, 1, false }, - { V4L2_PIX_FMT_NV61M, MEDIA_BUS_FMT_AYUV8_1X32, - VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 2, { 8, 16, 0 }, false, true, 2, 1, false }, - { V4L2_PIX_FMT_YUV420M, MEDIA_BUS_FMT_AYUV8_1X32, - VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | - VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, - 3, { 8, 8, 8 }, false, false, 2, 2, false }, -}; - -/* - * vsp1_get_format_info - Retrieve format information for a 4CC - * @fourcc: the format 4CC - * - * Return a pointer to the format information structure corresponding to the - * given V4L2 format 4CC, or NULL if no corresponding format can be found. - */ -static const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) { - const struct vsp1_format_info *info = &vsp1_video_formats[i]; - - if (info->fourcc == fourcc) - return info; - } - - return NULL; -} - - static struct v4l2_subdev * vsp1_video_remote_subdev(struct media_pad *local, u32 *pad) { @@ -184,9 +78,9 @@ static int vsp1_video_verify_format(struct vsp1_video *video) if (ret < 0) return ret == -ENOIOCTLCMD ? -EINVAL : ret; - if (video->fmtinfo->mbus != fmt.format.code || - video->format.height != fmt.format.height || - video->format.width != fmt.format.width) + if (video->rwpf->fmtinfo->mbus != fmt.format.code || + video->rwpf->format.height != fmt.format.height || + video->rwpf->format.width != fmt.format.width) return -EINVAL; return 0; @@ -277,9 +171,9 @@ static int __vsp1_video_try_format(struct vsp1_video *video, * Pipeline Management */ -static int vsp1_pipeline_validate_branch(struct vsp1_pipeline *pipe, - struct vsp1_rwpf *input, - struct vsp1_rwpf *output) +static int vsp1_video_pipeline_validate_branch(struct vsp1_pipeline *pipe, + struct vsp1_rwpf *input, + struct vsp1_rwpf *output) { struct vsp1_entity *entity; struct media_entity_enum ent_enum; @@ -370,29 +264,8 @@ out: return rval; } -static void __vsp1_pipeline_cleanup(struct vsp1_pipeline *pipe) -{ - if (pipe->bru) { - struct vsp1_bru *bru = to_bru(&pipe->bru->subdev); - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(bru->inputs); ++i) - bru->inputs[i].rpf = NULL; - } - - INIT_LIST_HEAD(&pipe->entities); - pipe->state = VSP1_PIPELINE_STOPPED; - pipe->buffers_ready = 0; - pipe->num_video = 0; - pipe->num_inputs = 0; - pipe->output = NULL; - pipe->bru = NULL; - pipe->lif = NULL; - pipe->uds = NULL; -} - -static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe, - struct vsp1_video *video) +static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe, + struct vsp1_video *video) { struct media_entity_graph graph; struct media_entity *entity = &video->video.entity; @@ -416,10 +289,8 @@ static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe, struct vsp1_rwpf *rwpf; struct vsp1_entity *e; - if (is_media_entity_v4l2_io(entity)) { - pipe->num_video++; + if (is_media_entity_v4l2_io(entity)) continue; - } subdev = media_entity_to_v4l2_subdev(entity); e = to_vsp1_entity(subdev); @@ -427,12 +298,12 @@ static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe, if (e->type == VSP1_ENTITY_RPF) { rwpf = to_rwpf(subdev); - pipe->inputs[pipe->num_inputs++] = rwpf; - rwpf->video.pipe_index = pipe->num_inputs; + pipe->inputs[rwpf->entity.index] = rwpf; + rwpf->video->pipe_index = ++pipe->num_inputs; } else if (e->type == VSP1_ENTITY_WPF) { rwpf = to_rwpf(subdev); - pipe->output = to_rwpf(subdev); - rwpf->video.pipe_index = 0; + pipe->output = rwpf; + rwpf->video->pipe_index = 0; } else if (e->type == VSP1_ENTITY_LIF) { pipe->lif = e; } else if (e->type == VSP1_ENTITY_BRU) { @@ -453,9 +324,12 @@ static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe, /* Follow links downstream for each input and make sure the graph * contains no loop and that all branches end at the output WPF. */ - for (i = 0; i < pipe->num_inputs; ++i) { - ret = vsp1_pipeline_validate_branch(pipe, pipe->inputs[i], - pipe->output); + for (i = 0; i < video->vsp1->info->rpf_count; ++i) { + if (!pipe->inputs[i]) + continue; + + ret = vsp1_video_pipeline_validate_branch(pipe, pipe->inputs[i], + pipe->output); if (ret < 0) goto error; } @@ -463,12 +337,12 @@ static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe, return 0; error: - __vsp1_pipeline_cleanup(pipe); + vsp1_pipeline_reset(pipe); return ret; } -static int vsp1_pipeline_init(struct vsp1_pipeline *pipe, - struct vsp1_video *video) +static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe, + struct vsp1_video *video) { int ret; @@ -476,7 +350,7 @@ static int vsp1_pipeline_init(struct vsp1_pipeline *pipe, /* If we're the first user validate and initialize the pipeline. */ if (pipe->use_count == 0) { - ret = vsp1_pipeline_validate(pipe, video); + ret = vsp1_video_pipeline_validate(pipe, video); if (ret < 0) goto done; } @@ -489,75 +363,17 @@ done: return ret; } -static void vsp1_pipeline_cleanup(struct vsp1_pipeline *pipe) +static void vsp1_video_pipeline_cleanup(struct vsp1_pipeline *pipe) { mutex_lock(&pipe->lock); /* If we're the last user clean up the pipeline. */ if (--pipe->use_count == 0) - __vsp1_pipeline_cleanup(pipe); + vsp1_pipeline_reset(pipe); mutex_unlock(&pipe->lock); } -static void vsp1_pipeline_run(struct vsp1_pipeline *pipe) -{ - struct vsp1_device *vsp1 = pipe->output->entity.vsp1; - - vsp1_write(vsp1, VI6_CMD(pipe->output->entity.index), VI6_CMD_STRCMD); - pipe->state = VSP1_PIPELINE_RUNNING; - pipe->buffers_ready = 0; -} - -static bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe) -{ - unsigned long flags; - bool stopped; - - spin_lock_irqsave(&pipe->irqlock, flags); - stopped = pipe->state == VSP1_PIPELINE_STOPPED; - spin_unlock_irqrestore(&pipe->irqlock, flags); - - return stopped; -} - -static int vsp1_pipeline_stop(struct vsp1_pipeline *pipe) -{ - struct vsp1_entity *entity; - unsigned long flags; - int ret; - - spin_lock_irqsave(&pipe->irqlock, flags); - if (pipe->state == VSP1_PIPELINE_RUNNING) - pipe->state = VSP1_PIPELINE_STOPPING; - spin_unlock_irqrestore(&pipe->irqlock, flags); - - ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe), - msecs_to_jiffies(500)); - ret = ret == 0 ? -ETIMEDOUT : 0; - - list_for_each_entry(entity, &pipe->entities, list_pipe) { - if (entity->route && entity->route->reg) - vsp1_write(entity->vsp1, entity->route->reg, - VI6_DPR_NODE_UNUSED); - - v4l2_subdev_call(&entity->subdev, video, s_stream, 0); - } - - return ret; -} - -static bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe) -{ - unsigned int mask; - - mask = ((1 << pipe->num_inputs) - 1) << 1; - if (!pipe->lif) - mask |= 1 << 0; - - return pipe->buffers_ready == mask; -} - /* * vsp1_video_complete_buffer - Complete the current buffer * @video: the video node @@ -572,12 +388,12 @@ static bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe) * * Return the next queued buffer or NULL if the queue is empty. */ -static struct vsp1_video_buffer * +static struct vsp1_vb2_buffer * vsp1_video_complete_buffer(struct vsp1_video *video) { struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity); - struct vsp1_video_buffer *next = NULL; - struct vsp1_video_buffer *done; + struct vsp1_vb2_buffer *next = NULL; + struct vsp1_vb2_buffer *done; unsigned long flags; unsigned int i; @@ -589,7 +405,7 @@ vsp1_video_complete_buffer(struct vsp1_video *video) } done = list_first_entry(&video->irqqueue, - struct vsp1_video_buffer, queue); + struct vsp1_vb2_buffer, queue); /* In DU output mode reuse the buffer if the list is singular. */ if (pipe->lif && list_is_singular(&video->irqqueue)) { @@ -601,23 +417,25 @@ vsp1_video_complete_buffer(struct vsp1_video *video) if (!list_empty(&video->irqqueue)) next = list_first_entry(&video->irqqueue, - struct vsp1_video_buffer, queue); + struct vsp1_vb2_buffer, queue); spin_unlock_irqrestore(&video->irqlock, flags); done->buf.sequence = video->sequence++; done->buf.vb2_buf.timestamp = ktime_get_ns(); for (i = 0; i < done->buf.vb2_buf.num_planes; ++i) - vb2_set_plane_payload(&done->buf.vb2_buf, i, done->length[i]); + vb2_set_plane_payload(&done->buf.vb2_buf, i, + done->mem.length[i]); vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE); return next; } static void vsp1_video_frame_end(struct vsp1_pipeline *pipe, - struct vsp1_video *video) + struct vsp1_rwpf *rwpf) { - struct vsp1_video_buffer *buf; + struct vsp1_video *video = rwpf->video; + struct vsp1_vb2_buffer *buf; unsigned long flags; buf = vsp1_video_complete_buffer(video); @@ -626,155 +444,27 @@ static void vsp1_video_frame_end(struct vsp1_pipeline *pipe, spin_lock_irqsave(&pipe->irqlock, flags); - video->ops->queue(video, buf); + video->rwpf->ops->set_memory(video->rwpf, &buf->mem); pipe->buffers_ready |= 1 << video->pipe_index; spin_unlock_irqrestore(&pipe->irqlock, flags); } -void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe) +static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe) { - enum vsp1_pipeline_state state; - unsigned long flags; + struct vsp1_device *vsp1 = pipe->output->entity.vsp1; unsigned int i; - if (pipe == NULL) - return; - /* Complete buffers on all video nodes. */ - for (i = 0; i < pipe->num_inputs; ++i) - vsp1_video_frame_end(pipe, &pipe->inputs[i]->video); - - if (!pipe->lif) - vsp1_video_frame_end(pipe, &pipe->output->video); - - spin_lock_irqsave(&pipe->irqlock, flags); - - state = pipe->state; - pipe->state = VSP1_PIPELINE_STOPPED; - - /* If a stop has been requested, mark the pipeline as stopped and - * return. - */ - if (state == VSP1_PIPELINE_STOPPING) { - wake_up(&pipe->wq); - goto done; - } - - /* Restart the pipeline if ready. */ - if (vsp1_pipeline_ready(pipe)) - vsp1_pipeline_run(pipe); - -done: - spin_unlock_irqrestore(&pipe->irqlock, flags); -} - -/* - * Propagate the alpha value through the pipeline. - * - * As the UDS has restricted scaling capabilities when the alpha component needs - * to be scaled, we disable alpha scaling when the UDS input has a fixed alpha - * value. The UDS then outputs a fixed alpha value which needs to be programmed - * from the input RPF alpha. - */ -void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe, - struct vsp1_entity *input, - unsigned int alpha) -{ - struct vsp1_entity *entity; - struct media_pad *pad; - - pad = media_entity_remote_pad(&input->pads[RWPF_PAD_SOURCE]); - - while (pad) { - if (!is_media_entity_v4l2_subdev(pad->entity)) - break; - - entity = to_vsp1_entity(media_entity_to_v4l2_subdev(pad->entity)); - - /* The BRU background color has a fixed alpha value set to 255, - * the output alpha value is thus always equal to 255. - */ - if (entity->type == VSP1_ENTITY_BRU) - alpha = 255; - - if (entity->type == VSP1_ENTITY_UDS) { - struct vsp1_uds *uds = to_uds(&entity->subdev); - - vsp1_uds_set_alpha(uds, alpha); - break; - } - - pad = &entity->pads[entity->source_pad]; - pad = media_entity_remote_pad(pad); - } -} - -void vsp1_pipelines_suspend(struct vsp1_device *vsp1) -{ - unsigned long flags; - unsigned int i; - int ret; - - /* To avoid increasing the system suspend time needlessly, loop over the - * pipelines twice, first to set them all to the stopping state, and then - * to wait for the stop to complete. - */ - for (i = 0; i < vsp1->pdata.wpf_count; ++i) { - struct vsp1_rwpf *wpf = vsp1->wpf[i]; - struct vsp1_pipeline *pipe; - - if (wpf == NULL) - continue; - - pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity); - if (pipe == NULL) - continue; - - spin_lock_irqsave(&pipe->irqlock, flags); - if (pipe->state == VSP1_PIPELINE_RUNNING) - pipe->state = VSP1_PIPELINE_STOPPING; - spin_unlock_irqrestore(&pipe->irqlock, flags); - } - - for (i = 0; i < vsp1->pdata.wpf_count; ++i) { - struct vsp1_rwpf *wpf = vsp1->wpf[i]; - struct vsp1_pipeline *pipe; - - if (wpf == NULL) - continue; - - pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity); - if (pipe == NULL) + for (i = 0; i < vsp1->info->rpf_count; ++i) { + if (!pipe->inputs[i]) continue; - ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe), - msecs_to_jiffies(500)); - if (ret == 0) - dev_warn(vsp1->dev, "pipeline %u stop timeout\n", - wpf->entity.index); + vsp1_video_frame_end(pipe, pipe->inputs[i]); } -} - -void vsp1_pipelines_resume(struct vsp1_device *vsp1) -{ - unsigned int i; - - /* Resume pipeline all running pipelines. */ - for (i = 0; i < vsp1->pdata.wpf_count; ++i) { - struct vsp1_rwpf *wpf = vsp1->wpf[i]; - struct vsp1_pipeline *pipe; - if (wpf == NULL) - continue; - - pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity); - if (pipe == NULL) - continue; - - if (vsp1_pipeline_ready(pipe)) - vsp1_pipeline_run(pipe); - } + if (!pipe->lif) + vsp1_video_frame_end(pipe, pipe->output); } /* ----------------------------------------------------------------------------- @@ -787,7 +477,7 @@ vsp1_video_queue_setup(struct vb2_queue *vq, unsigned int sizes[], void *alloc_ctxs[]) { struct vsp1_video *video = vb2_get_drv_priv(vq); - const struct v4l2_pix_format_mplane *format = &video->format; + const struct v4l2_pix_format_mplane *format = &video->rwpf->format; unsigned int i; if (*nplanes) { @@ -816,18 +506,20 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue); - struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf); - const struct v4l2_pix_format_mplane *format = &video->format; + struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf); + const struct v4l2_pix_format_mplane *format = &video->rwpf->format; unsigned int i; if (vb->num_planes < format->num_planes) return -EINVAL; + buf->mem.num_planes = vb->num_planes; + for (i = 0; i < vb->num_planes; ++i) { - buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i); - buf->length[i] = vb2_plane_size(vb, i); + buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i); + buf->mem.length[i] = vb2_plane_size(vb, i); - if (buf->length[i] < format->plane_fmt[i].sizeimage) + if (buf->mem.length[i] < format->plane_fmt[i].sizeimage) return -EINVAL; } @@ -839,7 +531,7 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb) struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue); struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity); - struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf); + struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf); unsigned long flags; bool empty; @@ -853,7 +545,7 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb) spin_lock_irqsave(&pipe->irqlock, flags); - video->ops->queue(video, buf); + video->rwpf->ops->set_memory(video->rwpf, &buf->mem); pipe->buffers_ready |= 1 << video->pipe_index; if (vb2_is_streaming(&video->queue) && @@ -863,18 +555,6 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb) spin_unlock_irqrestore(&pipe->irqlock, flags); } -static void vsp1_entity_route_setup(struct vsp1_entity *source) -{ - struct vsp1_entity *sink; - - if (source->route->reg == 0) - return; - - sink = container_of(source->sink, struct vsp1_entity, subdev.entity); - vsp1_write(source->vsp1, source->route->reg, - sink->route->inputs[source->sink_pad]); -} - static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vsp1_video *video = vb2_get_drv_priv(vq); @@ -884,7 +564,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) int ret; mutex_lock(&pipe->lock); - if (pipe->stream_count == pipe->num_video - 1) { + if (pipe->stream_count == pipe->num_inputs) { if (pipe->uds) { struct vsp1_uds *uds = to_uds(&pipe->uds->subdev); @@ -900,7 +580,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) struct vsp1_rwpf *rpf = to_rwpf(&pipe->uds_input->subdev); - uds->scale_alpha = rpf->video.fmtinfo->alpha; + uds->scale_alpha = rpf->fmtinfo->alpha; } } @@ -931,7 +611,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq) { struct vsp1_video *video = vb2_get_drv_priv(vq); struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity); - struct vsp1_video_buffer *buffer; + struct vsp1_vb2_buffer *buffer; unsigned long flags; int ret; @@ -944,7 +624,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq) } mutex_unlock(&pipe->lock); - vsp1_pipeline_cleanup(pipe); + vsp1_video_pipeline_cleanup(pipe); media_entity_pipeline_stop(&video->video.entity); /* Remove all buffers from the IRQ queue. */ @@ -1004,7 +684,7 @@ vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format) return -EINVAL; mutex_lock(&video->lock); - format->fmt.pix_mp = video->format; + format->fmt.pix_mp = video->rwpf->format; mutex_unlock(&video->lock); return 0; @@ -1044,8 +724,8 @@ vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format) goto done; } - video->format = format->fmt.pix_mp; - video->fmtinfo = info; + video->rwpf->format = format->fmt.pix_mp; + video->rwpf->fmtinfo = info; done: mutex_unlock(&video->lock); @@ -1085,7 +765,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) if (ret < 0) goto err_stop; - ret = vsp1_pipeline_init(pipe, video); + ret = vsp1_video_pipeline_init(pipe, video); if (ret < 0) goto err_stop; @@ -1097,7 +777,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) return 0; err_cleanup: - vsp1_pipeline_cleanup(pipe); + vsp1_video_pipeline_cleanup(pipe); err_stop: media_entity_pipeline_stop(&video->video.entity); return ret; @@ -1183,62 +863,64 @@ static struct v4l2_file_operations vsp1_video_fops = { * Initialization and Cleanup */ -int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf) +struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1, + struct vsp1_rwpf *rwpf) { + struct vsp1_video *video; const char *direction; int ret; - switch (video->type) { - case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - direction = "output"; - video->pad.flags = MEDIA_PAD_FL_SINK; - break; + video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL); + if (!video) + return ERR_PTR(-ENOMEM); + + rwpf->video = video; + + video->vsp1 = vsp1; + video->rwpf = rwpf; - case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + if (rwpf->entity.type == VSP1_ENTITY_RPF) { direction = "input"; + video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; video->pad.flags = MEDIA_PAD_FL_SOURCE; video->video.vfl_dir = VFL_DIR_TX; - break; - - default: - return -EINVAL; + } else { + direction = "output"; + video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + video->pad.flags = MEDIA_PAD_FL_SINK; + video->video.vfl_dir = VFL_DIR_RX; } - video->rwpf = rwpf; - mutex_init(&video->lock); spin_lock_init(&video->irqlock); INIT_LIST_HEAD(&video->irqqueue); - mutex_init(&video->pipe.lock); - spin_lock_init(&video->pipe.irqlock); - INIT_LIST_HEAD(&video->pipe.entities); - init_waitqueue_head(&video->pipe.wq); - video->pipe.state = VSP1_PIPELINE_STOPPED; + vsp1_pipeline_init(&video->pipe); + video->pipe.frame_end = vsp1_video_pipeline_frame_end; /* Initialize the media entity... */ ret = media_entity_pads_init(&video->video.entity, 1, &video->pad); if (ret < 0) - return ret; + return ERR_PTR(ret); /* ... and the format ... */ - video->fmtinfo = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT); - video->format.pixelformat = video->fmtinfo->fourcc; - video->format.colorspace = V4L2_COLORSPACE_SRGB; - video->format.field = V4L2_FIELD_NONE; - video->format.width = VSP1_VIDEO_DEF_WIDTH; - video->format.height = VSP1_VIDEO_DEF_HEIGHT; - video->format.num_planes = 1; - video->format.plane_fmt[0].bytesperline = - video->format.width * video->fmtinfo->bpp[0] / 8; - video->format.plane_fmt[0].sizeimage = - video->format.plane_fmt[0].bytesperline * video->format.height; + rwpf->fmtinfo = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT); + rwpf->format.pixelformat = rwpf->fmtinfo->fourcc; + rwpf->format.colorspace = V4L2_COLORSPACE_SRGB; + rwpf->format.field = V4L2_FIELD_NONE; + rwpf->format.width = VSP1_VIDEO_DEF_WIDTH; + rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT; + rwpf->format.num_planes = 1; + rwpf->format.plane_fmt[0].bytesperline = + rwpf->format.width * rwpf->fmtinfo->bpp[0] / 8; + rwpf->format.plane_fmt[0].sizeimage = + rwpf->format.plane_fmt[0].bytesperline * rwpf->format.height; /* ... and the video node... */ video->video.v4l2_dev = &video->vsp1->v4l2_dev; video->video.fops = &vsp1_video_fops; snprintf(video->video.name, sizeof(video->video.name), "%s %s", - rwpf->subdev.name, direction); + rwpf->entity.subdev.name, direction); video->video.vfl_type = VFL_TYPE_GRABBER; video->video.release = video_device_release_empty; video->video.ioctl_ops = &vsp1_video_ioctl_ops; @@ -1256,7 +938,7 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf) video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; video->queue.lock = &video->lock; video->queue.drv_priv = video; - video->queue.buf_struct_size = sizeof(struct vsp1_video_buffer); + video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer); video->queue.ops = &vsp1_video_queue_qops; video->queue.mem_ops = &vb2_dma_contig_memops; video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; @@ -1274,12 +956,12 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf) goto error; } - return 0; + return video; error: vb2_dma_contig_cleanup_ctx(video->alloc_ctx); vsp1_video_cleanup(video); - return ret; + return ERR_PTR(ret); } void vsp1_video_cleanup(struct vsp1_video *video) diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h index a929aa81cdbf..64abd39ee1e7 100644 --- a/drivers/media/platform/vsp1/vsp1_video.h +++ b/drivers/media/platform/vsp1/vsp1_video.h @@ -15,115 +15,34 @@ #include <linux/list.h> #include <linux/spinlock.h> -#include <linux/wait.h> -#include <media/media-entity.h> #include <media/videobuf2-v4l2.h> -struct vsp1_video; +#include "vsp1_pipe.h" +#include "vsp1_rwpf.h" -/* - * struct vsp1_format_info - VSP1 video format description - * @mbus: media bus format code - * @fourcc: V4L2 pixel format FCC identifier - * @planes: number of planes - * @bpp: bits per pixel - * @hwfmt: VSP1 hardware format - * @swap_yc: the Y and C components are swapped (Y comes before C) - * @swap_uv: the U and V components are swapped (V comes before U) - * @hsub: horizontal subsampling factor - * @vsub: vertical subsampling factor - * @alpha: has an alpha channel - */ -struct vsp1_format_info { - u32 fourcc; - unsigned int mbus; - unsigned int hwfmt; - unsigned int swap; - unsigned int planes; - unsigned int bpp[3]; - bool swap_yc; - bool swap_uv; - unsigned int hsub; - unsigned int vsub; - bool alpha; -}; - -enum vsp1_pipeline_state { - VSP1_PIPELINE_STOPPED, - VSP1_PIPELINE_RUNNING, - VSP1_PIPELINE_STOPPING, -}; - -/* - * struct vsp1_pipeline - A VSP1 hardware pipeline - * @media: the media pipeline - * @irqlock: protects the pipeline state - * @lock: protects the pipeline use count and stream count - */ -struct vsp1_pipeline { - struct media_pipeline pipe; - - spinlock_t irqlock; - enum vsp1_pipeline_state state; - wait_queue_head_t wq; - - struct mutex lock; - unsigned int use_count; - unsigned int stream_count; - unsigned int buffers_ready; - - unsigned int num_video; - unsigned int num_inputs; - struct vsp1_rwpf *inputs[VSP1_MAX_RPF]; - struct vsp1_rwpf *output; - struct vsp1_entity *bru; - struct vsp1_entity *lif; - struct vsp1_entity *uds; - struct vsp1_entity *uds_input; - - struct list_head entities; -}; - -static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e) -{ - if (likely(e->pipe)) - return container_of(e->pipe, struct vsp1_pipeline, pipe); - else - return NULL; -} - -struct vsp1_video_buffer { +struct vsp1_vb2_buffer { struct vb2_v4l2_buffer buf; struct list_head queue; - - dma_addr_t addr[3]; - unsigned int length[3]; + struct vsp1_rwpf_memory mem; }; -static inline struct vsp1_video_buffer * -to_vsp1_video_buffer(struct vb2_v4l2_buffer *vbuf) +static inline struct vsp1_vb2_buffer * +to_vsp1_vb2_buffer(struct vb2_v4l2_buffer *vbuf) { - return container_of(vbuf, struct vsp1_video_buffer, buf); + return container_of(vbuf, struct vsp1_vb2_buffer, buf); } -struct vsp1_video_operations { - void (*queue)(struct vsp1_video *video, struct vsp1_video_buffer *buf); -}; - struct vsp1_video { + struct list_head list; struct vsp1_device *vsp1; - struct vsp1_entity *rwpf; - - const struct vsp1_video_operations *ops; + struct vsp1_rwpf *rwpf; struct video_device video; enum v4l2_buf_type type; struct media_pad pad; struct mutex lock; - struct v4l2_pix_format_mplane format; - const struct vsp1_format_info *fmtinfo; struct vsp1_pipeline pipe; unsigned int pipe_index; @@ -140,16 +59,8 @@ static inline struct vsp1_video *to_vsp1_video(struct video_device *vdev) return container_of(vdev, struct vsp1_video, video); } -int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf); +struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1, + struct vsp1_rwpf *rwpf); void vsp1_video_cleanup(struct vsp1_video *video); -void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe); - -void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe, - struct vsp1_entity *input, - unsigned int alpha); - -void vsp1_pipelines_suspend(struct vsp1_device *vsp1); -void vsp1_pipelines_resume(struct vsp1_device *vsp1); - #endif /* __VSP1_VIDEO_H__ */ diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c index cbf514a6582d..c78d4af50fcf 100644 --- a/drivers/media/platform/vsp1/vsp1_wpf.c +++ b/drivers/media/platform/vsp1/vsp1_wpf.c @@ -34,8 +34,8 @@ static inline u32 vsp1_wpf_read(struct vsp1_rwpf *wpf, u32 reg) static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf, u32 reg, u32 data) { - vsp1_write(wpf->entity.vsp1, - reg + wpf->entity.index * VI6_WPF_OFFSET, data); + vsp1_mod_write(&wpf->entity, + reg + wpf->entity.index * VI6_WPF_OFFSET, data); } /* ----------------------------------------------------------------------------- @@ -88,7 +88,8 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable) if (!enable) { vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0); - vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, 0); + vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET + + VI6_WPF_SRCRPF, 0); return 0; } @@ -97,9 +98,12 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable) * inputs as sub-layers and select the virtual RPF as the master * layer. */ - for (i = 0; i < pipe->num_inputs; ++i) { + for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *input = pipe->inputs[i]; + if (!input) + continue; + srcrpf |= (!pipe->bru && pipe->num_inputs == 1) ? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index) : VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index); @@ -112,7 +116,7 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable) /* Destination stride. */ if (!pipe->lif) { - struct v4l2_pix_format_mplane *format = &wpf->video.format; + struct v4l2_pix_format_mplane *format = &wpf->format; vsp1_wpf_write(wpf, VI6_WPF_DSTM_STRIDE_Y, format->plane_fmt[0].bytesperline); @@ -130,7 +134,7 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable) /* Format */ if (!pipe->lif) { - const struct vsp1_format_info *fmtinfo = wpf->video.fmtinfo; + const struct vsp1_format_info *fmtinfo = wpf->fmtinfo; outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT; @@ -151,15 +155,17 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable) /* Take the control handler lock to ensure that the PDV value won't be * changed behind our back by a set control operation. */ - mutex_lock(wpf->ctrls.lock); - outfmt |= vsp1_wpf_read(wpf, VI6_WPF_OUTFMT) & VI6_WPF_OUTFMT_PDV_MASK; + if (vsp1->info->uapi) + mutex_lock(wpf->ctrls.lock); + outfmt |= wpf->alpha->cur.val << VI6_WPF_OUTFMT_PDV_SHIFT; vsp1_wpf_write(wpf, VI6_WPF_OUTFMT, outfmt); - mutex_unlock(wpf->ctrls.lock); + if (vsp1->info->uapi) + mutex_unlock(wpf->ctrls.lock); - vsp1_write(vsp1, VI6_DPR_WPF_FPORCH(wpf->entity.index), - VI6_DPR_WPF_FPORCH_FP_WPFN); + vsp1_mod_write(&wpf->entity, VI6_DPR_WPF_FPORCH(wpf->entity.index), + VI6_DPR_WPF_FPORCH_FP_WPFN); - vsp1_write(vsp1, VI6_WPF_WRBCK_CTRL, 0); + vsp1_mod_write(&wpf->entity, VI6_WPF_WRBCK_CTRL, 0); /* Enable interrupts */ vsp1_write(vsp1, VI6_WPF_IRQ_STA(wpf->entity.index), 0); @@ -195,20 +201,17 @@ static struct v4l2_subdev_ops wpf_ops = { * Video Device Operations */ -static void wpf_vdev_queue(struct vsp1_video *video, - struct vsp1_video_buffer *buf) +static void wpf_set_memory(struct vsp1_rwpf *wpf, struct vsp1_rwpf_memory *mem) { - struct vsp1_rwpf *wpf = container_of(video, struct vsp1_rwpf, video); - - vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, buf->addr[0]); - if (buf->buf.vb2_buf.num_planes > 1) - vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, buf->addr[1]); - if (buf->buf.vb2_buf.num_planes > 2) - vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, buf->addr[2]); + vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, mem->addr[0]); + if (mem->num_planes > 1) + vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, mem->addr[1]); + if (mem->num_planes > 2) + vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, mem->addr[2]); } -static const struct vsp1_video_operations wpf_vdev_ops = { - .queue = wpf_vdev_queue, +static const struct vsp1_rwpf_operations wpf_vdev_ops = { + .set_memory = wpf_set_memory, }; /* ----------------------------------------------------------------------------- @@ -218,7 +221,6 @@ static const struct vsp1_video_operations wpf_vdev_ops = { struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index) { struct v4l2_subdev *subdev; - struct vsp1_video *video; struct vsp1_rwpf *wpf; int ret; @@ -226,6 +228,8 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index) if (wpf == NULL) return ERR_PTR(-ENOMEM); + wpf->ops = &wpf_vdev_ops; + wpf->max_width = WPF_MAX_WIDTH; wpf->max_height = WPF_MAX_HEIGHT; @@ -240,7 +244,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index) subdev = &wpf->entity.subdev; v4l2_subdev_init(subdev, &wpf_ops); - subdev->entity.ops = &vsp1_media_ops; + subdev->entity.ops = &vsp1->media_ops; subdev->internal_ops = &vsp1_subdev_internal_ops; snprintf(subdev->name, sizeof(subdev->name), "%s wpf.%u", dev_name(vsp1->dev), index); @@ -251,8 +255,9 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index) /* Initialize the control handler. */ v4l2_ctrl_handler_init(&wpf->ctrls, 1); - v4l2_ctrl_new_std(&wpf->ctrls, &wpf_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, - 0, 255, 1, 255); + wpf->alpha = v4l2_ctrl_new_std(&wpf->ctrls, &wpf_ctrl_ops, + V4L2_CID_ALPHA_COMPONENT, + 0, 255, 1, 255); wpf->entity.subdev.ctrl_handler = &wpf->ctrls; @@ -263,48 +268,9 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index) goto error; } - /* Initialize the video device. */ - video = &wpf->video; - - video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; - video->vsp1 = vsp1; - video->ops = &wpf_vdev_ops; - - ret = vsp1_video_init(video, &wpf->entity); - if (ret < 0) - goto error; - - wpf->entity.video = video; - wpf->entity.sink = &wpf->video.video.entity; - return wpf; error: vsp1_entity_destroy(&wpf->entity); return ERR_PTR(ret); } - -/* - * vsp1_wpf_create_links() - RPF pads links creation - * @vsp1: Pointer to VSP1 device - * @entity: Pointer to VSP1 entity - * - * return negative error code or zero on success - */ -int vsp1_wpf_create_links(struct vsp1_device *vsp1, - struct vsp1_entity *entity) -{ - struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev); - unsigned int flags; - - /* Connect the video device to the WPF. All connections are immutable - * except for the WPF0 source link if a LIF is present. - */ - flags = MEDIA_LNK_FL_ENABLED; - if (!(vsp1->pdata.features & VSP1_HAS_LIF) || entity->index != 0) - flags |= MEDIA_LNK_FL_IMMUTABLE; - - return media_create_pad_link(&wpf->entity.subdev.entity, - RWPF_PAD_SOURCE, - &wpf->video.video.entity, 0, flags); -} diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 859f0c08ee05..271f725b17e8 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -1530,11 +1530,11 @@ static int si476x_radio_probe(struct platform_device *pdev) if (si476x_core_has_diversity(radio->core)) { si476x_ctrls[SI476X_IDX_DIVERSITY_MODE].def = si476x_phase_diversity_mode_to_idx(radio->core->diversity_mode); - si476x_radio_add_new_custom(radio, SI476X_IDX_DIVERSITY_MODE); + rval = si476x_radio_add_new_custom(radio, SI476X_IDX_DIVERSITY_MODE); if (rval < 0) goto exit; - si476x_radio_add_new_custom(radio, SI476X_IDX_INTERCHIP_LINK); + rval = si476x_radio_add_new_custom(radio, SI476X_IDX_INTERCHIP_LINK); if (rval < 0) goto exit; } diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c index 3e08475af579..4dc2067bce14 100644 --- a/drivers/media/radio/tea575x.c +++ b/drivers/media/radio/tea575x.c @@ -14,10 +14,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * */ #include <linux/delay.h> @@ -226,6 +222,7 @@ void snd_tea575x_set_freq(struct snd_tea575x *tea) snd_tea575x_write(tea, tea->val); tea->freq = snd_tea575x_val_to_freq(tea, tea->val); } +EXPORT_SYMBOL(snd_tea575x_set_freq); /* * Linux Video interface @@ -582,25 +579,11 @@ int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner) return 0; } +EXPORT_SYMBOL(snd_tea575x_init); void snd_tea575x_exit(struct snd_tea575x *tea) { video_unregister_device(&tea->vd); v4l2_ctrl_handler_free(tea->vd.ctrl_handler); } - -static int __init alsa_tea575x_module_init(void) -{ - return 0; -} - -static void __exit alsa_tea575x_module_exit(void) -{ -} - -module_init(alsa_tea575x_module_init) -module_exit(alsa_tea575x_module_exit) - -EXPORT_SYMBOL(snd_tea575x_init); EXPORT_SYMBOL(snd_tea575x_exit); -EXPORT_SYMBOL(snd_tea575x_set_freq); diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c index ebc73b034249..3f9e6df7d837 100644 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@ -68,7 +68,7 @@ MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan"); /* RDS buffer blocks */ static u32 default_rds_buf = 300; module_param(default_rds_buf, uint, 0444); -MODULE_PARM_DESC(rds_buf, "RDS buffer entries"); +MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries"); /* Radio Nr */ static u32 radio_nr = -1; diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index 18adf580f502..c96da3aaf00b 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c @@ -80,17 +80,24 @@ static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg) } /* enter extended function mode */ -static inline void nvt_efm_enable(struct nvt_dev *nvt) +static inline int nvt_efm_enable(struct nvt_dev *nvt) { + if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME)) + return -EBUSY; + /* Enabling Extended Function Mode explicitly requires writing 2x */ outb(EFER_EFM_ENABLE, nvt->cr_efir); outb(EFER_EFM_ENABLE, nvt->cr_efir); + + return 0; } /* exit extended function mode */ static inline void nvt_efm_disable(struct nvt_dev *nvt) { outb(EFER_EFM_DISABLE, nvt->cr_efir); + + release_region(nvt->cr_efir, 2); } /* @@ -100,8 +107,25 @@ static inline void nvt_efm_disable(struct nvt_dev *nvt) */ static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev) { - outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir); - outb(ldev, nvt->cr_efdr); + nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL); +} + +/* select and enable logical device with setting EFM mode*/ +static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev) +{ + nvt_efm_enable(nvt); + nvt_select_logical_dev(nvt, ldev); + nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); + nvt_efm_disable(nvt); +} + +/* select and disable logical device with setting EFM mode*/ +static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev) +{ + nvt_efm_enable(nvt); + nvt_select_logical_dev(nvt, ldev); + nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN); + nvt_efm_disable(nvt); } /* write val to cir config register */ @@ -137,6 +161,22 @@ static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset) return val; } +/* don't override io address if one is set already */ +static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr) +{ + unsigned long old_addr; + + old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8; + old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO); + + if (old_addr) + *ioaddr = old_addr; + else { + nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI); + nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO); + } +} + /* dump current cir register contents */ static void cir_dump_regs(struct nvt_dev *nvt) { @@ -251,7 +291,7 @@ static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id) /* detect hardware features */ -static void nvt_hw_detect(struct nvt_dev *nvt) +static int nvt_hw_detect(struct nvt_dev *nvt) { const char *chip_name; int chip_id; @@ -266,10 +306,17 @@ static void nvt_hw_detect(struct nvt_dev *nvt) nvt_efm_enable(nvt); nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); } - nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO); + nvt_efm_disable(nvt); + chip_id = nvt->chip_major << 8 | nvt->chip_minor; + if (chip_id == NVT_INVALID) { + dev_err(&nvt->pdev->dev, + "No device found on either EFM port\n"); + return -ENODEV; + } + chip_name = nvt_find_chip(nvt, chip_id); /* warn, but still let the driver load, if we don't know this chip */ @@ -282,7 +329,7 @@ static void nvt_hw_detect(struct nvt_dev *nvt) "found %s or compatible: chip id: 0x%02x 0x%02x", chip_name, nvt->chip_major, nvt->chip_minor); - nvt_efm_disable(nvt); + return 0; } static void nvt_cir_ldev_init(struct nvt_dev *nvt) @@ -305,12 +352,10 @@ static void nvt_cir_ldev_init(struct nvt_dev *nvt) val |= psval; nvt_cr_write(nvt, val, psreg); - /* Select CIR logical device and enable */ + /* Select CIR logical device */ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); - nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); - nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI); - nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO); + nvt_set_ioaddr(nvt, &nvt->cir_addr); nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC); @@ -320,7 +365,7 @@ static void nvt_cir_ldev_init(struct nvt_dev *nvt) static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt) { - /* Select ACPI logical device, enable it and CIR Wake */ + /* Select ACPI logical device and anable it */ nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); @@ -330,12 +375,10 @@ static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt) /* enable pme interrupt of cir wakeup event */ nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2); - /* Select CIR Wake logical device and enable */ + /* Select CIR Wake logical device */ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); - nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); - nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI); - nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO); + nvt_set_ioaddr(nvt, &nvt->cir_wake_addr); nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC); @@ -355,11 +398,19 @@ static void nvt_clear_cir_fifo(struct nvt_dev *nvt) /* clear out the hardware's cir wake rx fifo */ static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt) { - u8 val; + u8 val, config; + + config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON); + + /* clearing wake fifo works in learning mode only */ + nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0, + CIR_WAKE_IRCON); val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON); nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR, CIR_WAKE_FIFOCON); + + nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); } /* clear out the hardware's cir tx fifo */ @@ -408,6 +459,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt) /* and finally, enable interrupts */ nvt_set_cir_iren(nvt); + + /* enable the CIR logical device */ + nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR); } static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) @@ -442,10 +496,15 @@ static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) /* clear any and all stray interrupts */ nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS); + + /* enable the CIR WAKE logical device */ + nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); } static void nvt_enable_wake(struct nvt_dev *nvt) { + unsigned long flags; + nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI); @@ -457,12 +516,16 @@ static void nvt_enable_wake(struct nvt_dev *nvt) nvt_efm_disable(nvt); + spin_lock_irqsave(&nvt->nvt_lock, flags); + nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN | CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV | CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL, CIR_WAKE_IRCON); nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS); nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN); + + spin_unlock_irqrestore(&nvt->nvt_lock, flags); } #if 0 /* Currently unused */ @@ -670,7 +733,6 @@ static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt) /* copy data from hardware rx fifo into driver buffer */ static void nvt_get_rx_ir_data(struct nvt_dev *nvt) { - unsigned long flags; u8 fifocount, val; unsigned int b_idx; bool overrun = false; @@ -689,8 +751,6 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt) nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount); - spin_lock_irqsave(&nvt->nvt_lock, flags); - b_idx = nvt->pkts; /* This should never happen, but lets check anyway... */ @@ -712,8 +772,6 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt) if (overrun) nvt_handle_rx_fifo_overrun(nvt); - - spin_unlock_irqrestore(&nvt->nvt_lock, flags); } static void nvt_cir_log_irqs(u8 status, u8 iren) @@ -736,16 +794,13 @@ static void nvt_cir_log_irqs(u8 status, u8 iren) static bool nvt_cir_tx_inactive(struct nvt_dev *nvt) { unsigned long flags; - bool tx_inactive; u8 tx_state; spin_lock_irqsave(&nvt->tx.lock, flags); tx_state = nvt->tx.tx_state; spin_unlock_irqrestore(&nvt->tx.lock, flags); - tx_inactive = (tx_state == ST_TX_NONE); - - return tx_inactive; + return tx_state == ST_TX_NONE; } /* interrupt service routine for incoming and outgoing CIR data */ @@ -757,9 +812,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data) nvt_dbg_verbose("%s firing", __func__); - nvt_efm_enable(nvt); - nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); - nvt_efm_disable(nvt); + spin_lock_irqsave(&nvt->nvt_lock, flags); /* * Get IR Status register contents. Write 1 to ack/clear @@ -775,9 +828,14 @@ static irqreturn_t nvt_cir_isr(int irq, void *data) * 0: CIR_IRSTS_GH - Min Length Detected */ status = nvt_cir_reg_read(nvt, CIR_IRSTS); - if (!status) { + iren = nvt_cir_reg_read(nvt, CIR_IREN); + + /* IRQ may be shared with CIR WAKE, therefore check for each + * status bit whether the related interrupt source is enabled + */ + if (!(status & iren)) { + spin_unlock_irqrestore(&nvt->nvt_lock, flags); nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__); - nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); return IRQ_NONE; } @@ -785,13 +843,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data) nvt_cir_reg_write(nvt, status, CIR_IRSTS); nvt_cir_reg_write(nvt, 0, CIR_IRSTS); - /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */ - iren = nvt_cir_reg_read(nvt, CIR_IREN); - if (!iren) { - nvt_dbg_verbose("%s exiting, CIR not enabled", __func__); - return IRQ_NONE; - } - nvt_cir_log_irqs(status, iren); if (status & CIR_IRSTS_RTR) { @@ -805,16 +856,14 @@ static irqreturn_t nvt_cir_isr(int irq, void *data) if (nvt_cir_tx_inactive(nvt)) nvt_get_rx_ir_data(nvt); - spin_lock_irqsave(&nvt->nvt_lock, flags); - cur_state = nvt->study_state; - spin_unlock_irqrestore(&nvt->nvt_lock, flags); - if (cur_state == ST_STUDY_NONE) nvt_clear_cir_fifo(nvt); } + spin_unlock_irqrestore(&nvt->nvt_lock, flags); + if (status & CIR_IRSTS_TE) nvt_clear_tx_fifo(nvt); @@ -863,9 +912,18 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data) nvt_dbg_wake("%s firing", __func__); + spin_lock_irqsave(&nvt->nvt_lock, flags); + status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS); - if (!status) + iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN); + + /* IRQ may be shared with CIR, therefore check for each + * status bit whether the related interrupt source is enabled + */ + if (!(status & iren)) { + spin_unlock_irqrestore(&nvt->nvt_lock, flags); return IRQ_NONE; + } if (status & CIR_WAKE_IRSTS_IR_PENDING) nvt_clear_cir_wake_fifo(nvt); @@ -873,13 +931,6 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data) nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS); nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS); - /* Interrupt may be shared with CIR, bail if Wake not enabled */ - iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN); - if (!iren) { - nvt_dbg_wake("%s exiting, wake not enabled", __func__); - return IRQ_HANDLED; - } - if ((status & CIR_WAKE_IRSTS_PE) && (nvt->wake_state == ST_WAKE_START)) { while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) { @@ -888,39 +939,21 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data) } nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN); - spin_lock_irqsave(&nvt->nvt_lock, flags); nvt->wake_state = ST_WAKE_FINISH; - spin_unlock_irqrestore(&nvt->nvt_lock, flags); } + spin_unlock_irqrestore(&nvt->nvt_lock, flags); + nvt_dbg_wake("%s done", __func__); return IRQ_HANDLED; } -static void nvt_enable_cir(struct nvt_dev *nvt) +static void nvt_disable_cir(struct nvt_dev *nvt) { - /* set function enable flags */ - nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN | - CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL, - CIR_IRCON); - - nvt_efm_enable(nvt); - - /* enable the CIR logical device */ - nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); - nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); - - nvt_efm_disable(nvt); - - /* clear all pending interrupts */ - nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); + unsigned long flags; - /* enable interrupts */ - nvt_set_cir_iren(nvt); -} + spin_lock_irqsave(&nvt->nvt_lock, flags); -static void nvt_disable_cir(struct nvt_dev *nvt) -{ /* disable CIR interrupts */ nvt_cir_reg_write(nvt, 0, CIR_IREN); @@ -934,13 +967,10 @@ static void nvt_disable_cir(struct nvt_dev *nvt) nvt_clear_cir_fifo(nvt); nvt_clear_tx_fifo(nvt); - nvt_efm_enable(nvt); + spin_unlock_irqrestore(&nvt->nvt_lock, flags); /* disable the CIR logical device */ - nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); - nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN); - - nvt_efm_disable(nvt); + nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR); } static int nvt_open(struct rc_dev *dev) @@ -949,20 +979,31 @@ static int nvt_open(struct rc_dev *dev) unsigned long flags; spin_lock_irqsave(&nvt->nvt_lock, flags); - nvt_enable_cir(nvt); + + /* set function enable flags */ + nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN | + CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL, + CIR_IRCON); + + /* clear all pending interrupts */ + nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); + + /* enable interrupts */ + nvt_set_cir_iren(nvt); + spin_unlock_irqrestore(&nvt->nvt_lock, flags); + /* enable the CIR logical device */ + nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR); + return 0; } static void nvt_close(struct rc_dev *dev) { struct nvt_dev *nvt = dev->priv; - unsigned long flags; - spin_lock_irqsave(&nvt->nvt_lock, flags); nvt_disable_cir(nvt); - spin_unlock_irqrestore(&nvt->nvt_lock, flags); } /* Allocate memory, probe hardware, and initialize everything */ @@ -1024,7 +1065,9 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) init_waitqueue_head(&nvt->tx.queue); - nvt_hw_detect(nvt); + ret = nvt_hw_detect(nvt); + if (ret) + goto exit_free_dev_rdev; /* Initialize CIR & CIR Wake Logical Devices */ nvt_efm_enable(nvt); @@ -1032,7 +1075,10 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) nvt_cir_wake_ldev_init(nvt); nvt_efm_disable(nvt); - /* Initialize CIR & CIR Wake Config Registers */ + /* + * Initialize CIR & CIR Wake Config Registers + * and enable logical devices + */ nvt_cir_regs_init(nvt); nvt_cir_wake_regs_init(nvt); @@ -1079,12 +1125,12 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) goto exit_unregister_device; if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr, - CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) + CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake")) goto exit_unregister_device; if (devm_request_irq(&pdev->dev, nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED, - NVT_DRIVER_NAME, (void *)nvt)) + NVT_DRIVER_NAME "-wake", (void *)nvt)) goto exit_unregister_device; device_init_wakeup(&pdev->dev, true); @@ -1109,15 +1155,11 @@ exit_free_dev_rdev: static void nvt_remove(struct pnp_dev *pdev) { struct nvt_dev *nvt = pnp_get_drvdata(pdev); - unsigned long flags; - spin_lock_irqsave(&nvt->nvt_lock, flags); - /* disable CIR */ - nvt_cir_reg_write(nvt, 0, CIR_IREN); nvt_disable_cir(nvt); + /* enable CIR Wake (for IR power-on) */ nvt_enable_wake(nvt); - spin_unlock_irqrestore(&nvt->nvt_lock, flags); rc_unregister_device(nvt->rdev); } @@ -1129,26 +1171,23 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state) nvt_dbg("%s called", __func__); - /* zero out misc state tracking */ - spin_lock_irqsave(&nvt->nvt_lock, flags); - nvt->study_state = ST_STUDY_NONE; - nvt->wake_state = ST_WAKE_NONE; - spin_unlock_irqrestore(&nvt->nvt_lock, flags); - spin_lock_irqsave(&nvt->tx.lock, flags); nvt->tx.tx_state = ST_TX_NONE; spin_unlock_irqrestore(&nvt->tx.lock, flags); + spin_lock_irqsave(&nvt->nvt_lock, flags); + + /* zero out misc state tracking */ + nvt->study_state = ST_STUDY_NONE; + nvt->wake_state = ST_WAKE_NONE; + /* disable all CIR interrupts */ nvt_cir_reg_write(nvt, 0, CIR_IREN); - nvt_efm_enable(nvt); + spin_unlock_irqrestore(&nvt->nvt_lock, flags); /* disable cir logical dev */ - nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); - nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN); - - nvt_efm_disable(nvt); + nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR); /* make sure wake is enabled */ nvt_enable_wake(nvt); @@ -1162,16 +1201,6 @@ static int nvt_resume(struct pnp_dev *pdev) nvt_dbg("%s called", __func__); - /* open interrupt */ - nvt_set_cir_iren(nvt); - - /* Enable CIR logical device */ - nvt_efm_enable(nvt); - nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); - nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); - - nvt_efm_disable(nvt); - nvt_cir_regs_init(nvt); nvt_cir_wake_regs_init(nvt); @@ -1181,6 +1210,7 @@ static int nvt_resume(struct pnp_dev *pdev) static void nvt_shutdown(struct pnp_dev *pdev) { struct nvt_dev *nvt = pnp_get_drvdata(pdev); + nvt_enable_wake(nvt); } diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 0ad15d34e9c9..4a5650dffa29 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h @@ -68,7 +68,8 @@ enum nvt_chip_ver { NVT_W83667HG = 0xa510, NVT_6775F = 0xb470, NVT_6776F = 0xc330, - NVT_6779D = 0xc560 + NVT_6779D = 0xc560, + NVT_INVALID = 0xffff, }; struct nvt_chip { @@ -157,8 +158,8 @@ struct nvt_dev { /* total length of CIR and CIR WAKE */ #define CIR_IOREG_LENGTH 0x0f -/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL (0x7d0 = 2000) */ -#define CIR_RX_LIMIT_COUNT 0x7d0 +/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL */ +#define CIR_RX_LIMIT_COUNT (IR_DEFAULT_TIMEOUT / US_TO_NS(SAMPLE_PERIOD)) /* CIR Regs */ #define CIR_IRCON 0x00 @@ -292,10 +293,7 @@ struct nvt_dev { #define CIR_WAKE_IREN_RTR 0x40 #define CIR_WAKE_IREN_PE 0x20 #define CIR_WAKE_IREN_RFO 0x10 -#define CIR_WAKE_IREN_TE 0x08 -#define CIR_WAKE_IREN_TTR 0x04 -#define CIR_WAKE_IREN_TFU 0x02 -#define CIR_WAKE_IREN_GH 0x01 +#define CIR_WAKE_IREN_GH 0x08 /* CIR WAKE FIFOCON settings */ #define CIR_WAKE_FIFOCON_RXFIFOCLR 0x08 diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index 7359f3d03b64..585d5e52118d 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h @@ -16,6 +16,9 @@ #ifndef _RC_CORE_PRIV #define _RC_CORE_PRIV +/* Define the max number of pulse/space transitions to buffer */ +#define MAX_IR_EVENT_SIZE 512 + #include <linux/slab.h> #include <linux/spinlock.h> #include <media/rc-core.h> @@ -35,7 +38,8 @@ struct ir_raw_event_ctrl { struct list_head list; /* to keep track of raw clients */ struct task_struct *thread; spinlock_t lock; - struct kfifo_rec_ptr_1 kfifo; /* fifo for the pulse/space durations */ + /* fifo for the pulse/space durations */ + DECLARE_KFIFO(kfifo, struct ir_raw_event, MAX_IR_EVENT_SIZE); ktime_t last_event; /* when last event occurred */ enum raw_event_type last_type; /* last event type */ struct rc_dev *dev; /* pointer to the parent rc_dev */ diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index c69807fe2fef..144304c94606 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c @@ -20,9 +20,6 @@ #include <linux/freezer.h> #include "rc-core-priv.h" -/* Define the max number of pulse/space transitions to buffer */ -#define MAX_IR_EVENT_SIZE 512 - /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */ static LIST_HEAD(ir_raw_client_list); @@ -36,14 +33,12 @@ static int ir_raw_event_thread(void *data) struct ir_raw_event ev; struct ir_raw_handler *handler; struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data; - int retval; while (!kthread_should_stop()) { spin_lock_irq(&raw->lock); - retval = kfifo_len(&raw->kfifo); - if (retval < sizeof(ev)) { + if (!kfifo_len(&raw->kfifo)) { set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) @@ -54,7 +49,8 @@ static int ir_raw_event_thread(void *data) continue; } - retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev)); + if(!kfifo_out(&raw->kfifo, &ev, 1)) + dev_err(&raw->dev->dev, "IR event FIFO is empty!\n"); spin_unlock_irq(&raw->lock); mutex_lock(&ir_raw_handler_lock); @@ -87,8 +83,10 @@ int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev) IR_dprintk(2, "sample: (%05dus %s)\n", TO_US(ev->duration), TO_STR(ev->pulse)); - if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev)) - return -ENOMEM; + if (!kfifo_put(&dev->raw->kfifo, *ev)) { + dev_err(&dev->dev, "IR event FIFO is full!\n"); + return -ENOSPC; + } return 0; } @@ -273,11 +271,7 @@ int ir_raw_event_register(struct rc_dev *dev) dev->raw->dev = dev; dev->change_protocol = change_protocol; - rc = kfifo_alloc(&dev->raw->kfifo, - sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE, - GFP_KERNEL); - if (rc < 0) - goto out; + INIT_KFIFO(dev->raw->kfifo); spin_lock_init(&dev->raw->lock); dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, @@ -319,7 +313,6 @@ void ir_raw_event_unregister(struct rc_dev *dev) handler->raw_unregister(dev); mutex_unlock(&ir_raw_handler_lock); - kfifo_free(&dev->raw->kfifo); kfree(dev->raw); dev->raw = NULL; } diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 1042fa331a07..dcf20d9cbe09 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -723,12 +723,18 @@ int rc_open(struct rc_dev *rdev) return -EINVAL; mutex_lock(&rdev->lock); + if (!rdev->initialized) { + rval = -EINVAL; + goto unlock; + } + if (!rdev->users++ && rdev->open != NULL) rval = rdev->open(rdev); if (rval) rdev->users--; +unlock: mutex_unlock(&rdev->lock); return rval; @@ -874,6 +880,10 @@ static ssize_t show_protocols(struct device *device, return -EINVAL; mutex_lock(&dev->lock); + if (!dev->initialized) { + mutex_unlock(&dev->lock); + return -EINVAL; + } if (fattr->type == RC_FILTER_NORMAL) { enabled = dev->enabled_protocols; @@ -1074,6 +1084,10 @@ static ssize_t store_protocols(struct device *device, } mutex_lock(&dev->lock); + if (!dev->initialized) { + rc = -EINVAL; + goto out; + } old_protocols = *current_protocols; new_protocols = old_protocols; @@ -1154,12 +1168,17 @@ static ssize_t show_filter(struct device *device, if (!dev) return -EINVAL; + mutex_lock(&dev->lock); + if (!dev->initialized) { + mutex_unlock(&dev->lock); + return -EINVAL; + } + if (fattr->type == RC_FILTER_NORMAL) filter = &dev->scancode_filter; else filter = &dev->scancode_wakeup_filter; - mutex_lock(&dev->lock); if (fattr->mask) val = filter->mask; else @@ -1222,6 +1241,10 @@ static ssize_t store_filter(struct device *device, return -EINVAL; mutex_lock(&dev->lock); + if (!dev->initialized) { + ret = -EINVAL; + goto unlock; + } new_filter = *filter; if (fattr->mask) @@ -1419,14 +1442,6 @@ int rc_register_device(struct rc_dev *dev) dev->sysfs_groups[attr++] = &rc_dev_wakeup_protocol_attr_grp; dev->sysfs_groups[attr++] = NULL; - /* - * Take the lock here, as the device sysfs node will appear - * when device_add() is called, which may trigger an ir-keytable udev - * rule, which will in turn call show_protocols and access - * dev->enabled_protocols before it has been initialized. - */ - mutex_lock(&dev->lock); - rc = device_add(&dev->dev); if (rc) goto out_unlock; @@ -1440,13 +1455,7 @@ int rc_register_device(struct rc_dev *dev) dev->input_dev->phys = dev->input_phys; dev->input_dev->name = dev->input_name; - /* input_register_device can call ir_open, so unlock mutex here */ - mutex_unlock(&dev->lock); - rc = input_register_device(dev->input_dev); - - mutex_lock(&dev->lock); - if (rc) goto out_table; @@ -1475,10 +1484,7 @@ int rc_register_device(struct rc_dev *dev) request_module_nowait("ir-lirc-codec"); raw_init = true; } - /* calls ir_register_device so unlock mutex here*/ - mutex_unlock(&dev->lock); rc = ir_raw_event_register(dev); - mutex_lock(&dev->lock); if (rc < 0) goto out_input; } @@ -1491,6 +1497,8 @@ int rc_register_device(struct rc_dev *dev) dev->enabled_protocols = rc_type; } + mutex_lock(&dev->lock); + dev->initialized = true; mutex_unlock(&dev->lock); IR_dprintk(1, "Registered rc%u (driver: %s, remote: %s, mode %s)\n", @@ -1512,7 +1520,6 @@ out_table: out_dev: device_del(&dev->dev); out_unlock: - mutex_unlock(&dev->lock); ida_simple_remove(&rc_ida, minor); return rc; } diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c index 504bfbc4027a..9f3e0fd4cad9 100644 --- a/drivers/media/tuners/m88rs6000t.c +++ b/drivers/media/tuners/m88rs6000t.c @@ -461,13 +461,12 @@ static int m88rs6000t_sleep(struct dvb_frontend *fe) dev_dbg(&dev->client->dev, "%s:\n", __func__); ret = regmap_write(dev->regmap, 0x07, 0x6d); - if (ret) - goto err; - usleep_range(5000, 10000); -err: - if (ret) + if (ret) { dev_dbg(&dev->client->dev, "failed=%d\n", ret); - return ret; + return ret; + } + usleep_range(5000, 10000); + return 0; } static int m88rs6000t_get_frequency(struct dvb_frontend *fe, u32 *frequency) diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c index a7a8452e99d2..6ab35e315fe7 100644 --- a/drivers/media/tuners/r820t.c +++ b/drivers/media/tuners/r820t.c @@ -1295,7 +1295,7 @@ static int generic_set_freq(struct dvb_frontend *fe, v4l2_std_id std, u32 delsys) { struct r820t_priv *priv = fe->tuner_priv; - int rc = -EINVAL; + int rc; u32 lo_freq; tuner_dbg("should set frequency to %d kHz, bw %d MHz\n", diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index 0e1ca2b00e61..243ac3816028 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -364,8 +364,8 @@ static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency) static const struct dvb_tuner_ops si2157_ops = { .info = { .name = "Silicon Labs Si2146/2147/2148/2157/2158", - .frequency_min = 55000000, - .frequency_max = 862000000, + .frequency_min = 42000000, + .frequency_max = 870000000, }, .init = si2157_init, @@ -403,7 +403,7 @@ err: } static int si2157_probe(struct i2c_client *client, - const struct i2c_device_id *id) + const struct i2c_device_id *id) { struct si2157_config *cfg = client->dev.platform_data; struct dvb_frontend *fe = cfg->fe; @@ -438,6 +438,31 @@ static int si2157_probe(struct i2c_client *client, memcpy(&fe->ops.tuner_ops, &si2157_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = client; +#ifdef CONFIG_MEDIA_CONTROLLER + if (cfg->mdev) { + dev->mdev = cfg->mdev; + + dev->ent.name = KBUILD_MODNAME; + dev->ent.function = MEDIA_ENT_F_TUNER; + + dev->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK; + dev->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE; + dev->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(&dev->ent, TUNER_NUM_PADS, + &dev->pad[0]); + + if (ret) + goto err_kfree; + + ret = media_device_register_entity(cfg->mdev, &dev->ent); + if (ret) { + media_entity_cleanup(&dev->ent); + goto err_kfree; + } + } +#endif + dev_info(&client->dev, "Silicon Labs %s successfully attached\n", dev->chiptype == SI2157_CHIPTYPE_SI2146 ? "Si2146" : "Si2147/2148/2157/2158"); @@ -458,6 +483,14 @@ static int si2157_remove(struct i2c_client *client) dev_dbg(&client->dev, "\n"); + /* stop statistics polling */ + cancel_delayed_work_sync(&dev->stat_work); + +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + if (dev->mdev) + media_device_unregister_entity(&dev->ent); +#endif + memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = NULL; kfree(dev); diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h index 4db97ab744d6..5f1a60bf7ced 100644 --- a/drivers/media/tuners/si2157.h +++ b/drivers/media/tuners/si2157.h @@ -18,6 +18,7 @@ #define SI2157_H #include <linux/kconfig.h> +#include <media/media-device.h> #include "dvb_frontend.h" /* @@ -30,6 +31,10 @@ struct si2157_config { */ struct dvb_frontend *fe; +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_device *mdev; +#endif + /* * Spectral Inversion */ diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h index ecc463db8f69..589d558d381c 100644 --- a/drivers/media/tuners/si2157_priv.h +++ b/drivers/media/tuners/si2157_priv.h @@ -18,6 +18,7 @@ #define SI2157_PRIV_H #include <linux/firmware.h> +#include <media/v4l2-mc.h> #include "si2157.h" /* state struct */ @@ -31,6 +32,13 @@ struct si2157_dev { u8 if_port; u32 if_frequency; struct delayed_work stat_work; + +#if defined(CONFIG_MEDIA_CONTROLLER) + struct media_device *mdev; + struct media_entity ent; + struct media_pad pad[TUNER_NUM_PADS]; +#endif + }; #define SI2157_CHIPTYPE_SI2157 0 diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index 4e941f00b600..317ef63ee789 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c @@ -1403,11 +1403,14 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) * in order to avoid troubles during device release. */ kfree(priv->ctrl.fname); + priv->ctrl.fname = NULL; memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); if (p->fname) { priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); - if (priv->ctrl.fname == NULL) + if (priv->ctrl.fname == NULL) { rc = -ENOMEM; + goto unlock; + } } /* @@ -1439,6 +1442,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) } else priv->state = XC2028_WAITING_FIRMWARE; } +unlock: mutex_unlock(&priv->lock); return rc; diff --git a/drivers/media/usb/as102/as102_drv.h b/drivers/media/usb/as102/as102_drv.h index aee2d76e8dfc..8def19d9ab92 100644 --- a/drivers/media/usb/as102/as102_drv.h +++ b/drivers/media/usb/as102/as102_drv.h @@ -52,7 +52,7 @@ struct as10x_bus_adapter_t { struct as10x_cmd_t *cmd, *rsp; /* bus adapter private ops callback */ - struct as102_priv_ops_t *ops; + const struct as102_priv_ops_t *ops; }; struct as102_dev_t { diff --git a/drivers/media/usb/as102/as102_usb_drv.c b/drivers/media/usb/as102/as102_usb_drv.c index 3f669066ccf6..0e8030c071b8 100644 --- a/drivers/media/usb/as102/as102_usb_drv.c +++ b/drivers/media/usb/as102/as102_usb_drv.c @@ -189,7 +189,7 @@ static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap, return actual_len; } -static struct as102_priv_ops_t as102_priv_ops = { +static const struct as102_priv_ops_t as102_priv_ops = { .upload_fw_pkt = as102_send_ep1, .xfer_cmd = as102_usb_xfer_cmd, .as102_read_ep2 = as102_read_ep2, diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c index 9e29e70a78d7..7cafe4dd5fd1 100644 --- a/drivers/media/usb/au0828/au0828-core.c +++ b/drivers/media/usb/au0828/au0828-core.c @@ -143,7 +143,7 @@ static void au0828_unregister_media_device(struct au0828_dev *dev) #endif } -static void au0828_usb_release(struct au0828_dev *dev) +void au0828_usb_release(struct au0828_dev *dev) { au0828_unregister_media_device(dev); @@ -153,33 +153,6 @@ static void au0828_usb_release(struct au0828_dev *dev) kfree(dev); } -#ifdef CONFIG_VIDEO_AU0828_V4L2 - -static void au0828_usb_v4l2_media_release(struct au0828_dev *dev) -{ -#ifdef CONFIG_MEDIA_CONTROLLER - int i; - - for (i = 0; i < AU0828_MAX_INPUT; i++) { - if (AUVI_INPUT(i).type == AU0828_VMUX_UNDEFINED) - return; - media_device_unregister_entity(&dev->input_ent[i]); - } -#endif -} - -static void au0828_usb_v4l2_release(struct v4l2_device *v4l2_dev) -{ - struct au0828_dev *dev = - container_of(v4l2_dev, struct au0828_dev, v4l2_dev); - - v4l2_ctrl_handler_free(&dev->v4l2_ctrl_hdl); - v4l2_device_unregister(&dev->v4l2_dev); - au0828_usb_v4l2_media_release(dev); - au0828_usb_release(dev); -} -#endif - static void au0828_usb_disconnect(struct usb_interface *interface) { struct au0828_dev *dev = usb_get_intfdata(interface); @@ -202,18 +175,13 @@ static void au0828_usb_disconnect(struct usb_interface *interface) mutex_lock(&dev->mutex); dev->usbdev = NULL; mutex_unlock(&dev->mutex); -#ifdef CONFIG_VIDEO_AU0828_V4L2 - if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED) { - au0828_analog_unregister(dev); - v4l2_device_disconnect(&dev->v4l2_dev); - v4l2_device_put(&dev->v4l2_dev); + if (au0828_analog_unregister(dev)) { /* * No need to call au0828_usb_release() if V4L2 is enabled, * as this is already called via au0828_usb_v4l2_release() */ return; } -#endif au0828_usb_release(dev); } @@ -223,23 +191,12 @@ static int au0828_media_device_init(struct au0828_dev *dev, #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev; - mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); - if (!mdev) - return -ENOMEM; - - mdev->dev = &udev->dev; - if (!dev->board.name) - strlcpy(mdev->model, "unknown au0828", sizeof(mdev->model)); + mdev = v4l2_mc_usb_media_device_init(udev, "unknown au0828"); else - strlcpy(mdev->model, dev->board.name, sizeof(mdev->model)); - if (udev->serial) - strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial)); - strcpy(mdev->bus_info, udev->devpath); - mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - mdev->driver_version = LINUX_VERSION_CODE; - - media_device_init(mdev); + mdev = v4l2_mc_usb_media_device_init(udev, dev->board.name); + if (!mdev) + return -ENOMEM; dev->media_dev = mdev; #endif @@ -247,83 +204,6 @@ static int au0828_media_device_init(struct au0828_dev *dev, } -static int au0828_create_media_graph(struct au0828_dev *dev) -{ -#ifdef CONFIG_MEDIA_CONTROLLER - struct media_device *mdev = dev->media_dev; - struct media_entity *entity; - struct media_entity *tuner = NULL, *decoder = NULL; - int i, ret; - - if (!mdev) - return 0; - - media_device_for_each_entity(entity, mdev) { - switch (entity->function) { - case MEDIA_ENT_F_TUNER: - tuner = entity; - break; - case MEDIA_ENT_F_ATV_DECODER: - decoder = entity; - break; - } - } - - /* Analog setup, using tuner as a link */ - - /* Something bad happened! */ - if (!decoder) - return -EINVAL; - - if (tuner) { - ret = media_create_pad_link(tuner, TUNER_PAD_IF_OUTPUT, - decoder, 0, - MEDIA_LNK_FL_ENABLED); - if (ret) - return ret; - } - ret = media_create_pad_link(decoder, 1, &dev->vdev.entity, 0, - MEDIA_LNK_FL_ENABLED); - if (ret) - return ret; - ret = media_create_pad_link(decoder, 2, &dev->vbi_dev.entity, 0, - MEDIA_LNK_FL_ENABLED); - if (ret) - return ret; - - for (i = 0; i < AU0828_MAX_INPUT; i++) { - struct media_entity *ent = &dev->input_ent[i]; - - if (AUVI_INPUT(i).type == AU0828_VMUX_UNDEFINED) - break; - - switch (AUVI_INPUT(i).type) { - case AU0828_VMUX_CABLE: - case AU0828_VMUX_TELEVISION: - case AU0828_VMUX_DVB: - if (!tuner) - break; - - ret = media_create_pad_link(ent, 0, tuner, - TUNER_PAD_RF_INPUT, - MEDIA_LNK_FL_ENABLED); - if (ret) - return ret; - break; - case AU0828_VMUX_COMPOSITE: - case AU0828_VMUX_SVIDEO: - default: /* AU0828_VMUX_DEBUG */ - /* FIXME: fix the decoder PAD */ - ret = media_create_pad_link(ent, 0, decoder, 0, 0); - if (ret) - return ret; - break; - } - } -#endif - return 0; -} - static int au0828_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { @@ -378,32 +258,13 @@ static int au0828_usb_probe(struct usb_interface *interface, return retval; } -#ifdef CONFIG_VIDEO_AU0828_V4L2 - dev->v4l2_dev.release = au0828_usb_v4l2_release; - - /* Create the v4l2_device */ -#ifdef CONFIG_MEDIA_CONTROLLER - dev->v4l2_dev.mdev = dev->media_dev; -#endif - retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev); + retval = au0828_v4l2_device_register(interface, dev); if (retval) { - pr_err("%s() v4l2_device_register failed\n", - __func__); + au0828_usb_v4l2_media_release(dev); mutex_unlock(&dev->lock); kfree(dev); return retval; } - /* This control handler will inherit the controls from au8522 */ - retval = v4l2_ctrl_handler_init(&dev->v4l2_ctrl_hdl, 4); - if (retval) { - pr_err("%s() v4l2_ctrl_handler_init failed\n", - __func__); - mutex_unlock(&dev->lock); - kfree(dev); - return retval; - } - dev->v4l2_dev.ctrl_handler = &dev->v4l2_ctrl_hdl; -#endif /* Power Up the bridge */ au0828_write(dev, REG_600, 1 << 4); @@ -417,11 +278,13 @@ static int au0828_usb_probe(struct usb_interface *interface, /* Setup */ au0828_card_setup(dev); -#ifdef CONFIG_VIDEO_AU0828_V4L2 /* Analog TV */ - if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED) - au0828_analog_register(dev, interface); -#endif + retval = au0828_analog_register(dev, interface); + if (retval) { + pr_err("%s() au0282_dev_register failed to register on V4L2\n", + __func__); + goto done; + } /* Digital TV */ retval = au0828_dvb_register(dev); @@ -443,13 +306,6 @@ static int au0828_usb_probe(struct usb_interface *interface, mutex_unlock(&dev->lock); - retval = au0828_create_media_graph(dev); - if (retval) { - pr_err("%s() au0282_dev_register failed to create graph\n", - __func__); - goto done; - } - #ifdef CONFIG_MEDIA_CONTROLLER retval = media_device_register(dev->media_dev); #endif diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c index 94363a3ba400..0e174e860614 100644 --- a/drivers/media/usb/au0828/au0828-dvb.c +++ b/drivers/media/usb/au0828/au0828-dvb.c @@ -181,7 +181,7 @@ static int stop_urb_transfer(struct au0828_dev *dev) static int start_urb_transfer(struct au0828_dev *dev) { struct urb *purb; - int i, ret = -ENOMEM; + int i, ret; dprintk(2, "%s()\n", __func__); @@ -194,7 +194,7 @@ static int start_urb_transfer(struct au0828_dev *dev) dev->urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urbs[i]) - goto err; + return -ENOMEM; purb = dev->urbs[i]; @@ -207,9 +207,10 @@ static int start_urb_transfer(struct au0828_dev *dev) if (!purb->transfer_buffer) { usb_free_urb(purb); dev->urbs[i] = NULL; + ret = -ENOMEM; pr_err("%s: failed big buffer allocation, err = %d\n", __func__, ret); - goto err; + return ret; } purb->status = -EINPROGRESS; @@ -235,10 +236,7 @@ static int start_urb_transfer(struct au0828_dev *dev) } dev->urb_streaming = true; - ret = 0; - -err: - return ret; + return 0; } static void au0828_start_transport(struct au0828_dev *dev) diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c index 8c54fd21022e..2fc2b29d2dd9 100644 --- a/drivers/media/usb/au0828/au0828-video.c +++ b/drivers/media/usb/au0828/au0828-video.c @@ -638,6 +638,142 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb) return rc; } +void au0828_usb_v4l2_media_release(struct au0828_dev *dev) +{ +#ifdef CONFIG_MEDIA_CONTROLLER + int i; + + for (i = 0; i < AU0828_MAX_INPUT; i++) { + if (AUVI_INPUT(i).type == AU0828_VMUX_UNDEFINED) + return; + media_device_unregister_entity(&dev->input_ent[i]); + } +#endif +} + +static int au0828_create_media_graph(struct au0828_dev *dev) +{ +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_device *mdev = dev->media_dev; + struct media_entity *entity; + struct media_entity *tuner = NULL, *decoder = NULL; + int i, ret; + + if (!mdev) + return 0; + + media_device_for_each_entity(entity, mdev) { + switch (entity->function) { + case MEDIA_ENT_F_TUNER: + tuner = entity; + break; + case MEDIA_ENT_F_ATV_DECODER: + decoder = entity; + break; + } + } + + /* Analog setup, using tuner as a link */ + + /* Something bad happened! */ + if (!decoder) + return -EINVAL; + + if (tuner) { + ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT, + decoder, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } + ret = media_create_pad_link(decoder, 1, &dev->vdev.entity, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + ret = media_create_pad_link(decoder, 2, &dev->vbi_dev.entity, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + + for (i = 0; i < AU0828_MAX_INPUT; i++) { + struct media_entity *ent = &dev->input_ent[i]; + + switch (AUVI_INPUT(i).type) { + case AU0828_VMUX_UNDEFINED: + break; + case AU0828_VMUX_CABLE: + case AU0828_VMUX_TELEVISION: + case AU0828_VMUX_DVB: + if (!tuner) + break; + + ret = media_create_pad_link(ent, 0, tuner, + TUNER_PAD_RF_INPUT, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + break; + case AU0828_VMUX_COMPOSITE: + case AU0828_VMUX_SVIDEO: + /* FIXME: fix the decoder PAD */ + ret = media_create_pad_link(ent, 0, decoder, 0, 0); + if (ret) + return ret; + break; + } + } +#endif + return 0; +} + +static void au0828_usb_v4l2_release(struct v4l2_device *v4l2_dev) +{ + struct au0828_dev *dev = + container_of(v4l2_dev, struct au0828_dev, v4l2_dev); + + v4l2_ctrl_handler_free(&dev->v4l2_ctrl_hdl); + v4l2_device_unregister(&dev->v4l2_dev); + au0828_usb_v4l2_media_release(dev); + au0828_usb_release(dev); +} + +int au0828_v4l2_device_register(struct usb_interface *interface, + struct au0828_dev *dev) +{ + int retval; + + if (AUVI_INPUT(0).type == AU0828_VMUX_UNDEFINED) + return 0; + + /* Create the v4l2_device */ +#ifdef CONFIG_MEDIA_CONTROLLER + dev->v4l2_dev.mdev = dev->media_dev; +#endif + retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev); + if (retval) { + pr_err("%s() v4l2_device_register failed\n", + __func__); + mutex_unlock(&dev->lock); + kfree(dev); + return retval; + } + + dev->v4l2_dev.release = au0828_usb_v4l2_release; + + /* This control handler will inherit the controls from au8522 */ + retval = v4l2_ctrl_handler_init(&dev->v4l2_ctrl_hdl, 4); + if (retval) { + pr_err("%s() v4l2_ctrl_handler_init failed\n", + __func__); + mutex_unlock(&dev->lock); + kfree(dev); + return retval; + } + dev->v4l2_dev.ctrl_handler = &dev->v4l2_ctrl_hdl; + + return 0; +} + static int au0828_enable_analog_tuner(struct au0828_dev *dev) { #ifdef CONFIG_MEDIA_CONTROLLER @@ -949,13 +1085,23 @@ static struct vb2_ops au0828_video_qops = { * au0828_analog_unregister * unregister v4l2 devices */ -void au0828_analog_unregister(struct au0828_dev *dev) +int au0828_analog_unregister(struct au0828_dev *dev) { dprintk(1, "au0828_analog_unregister called\n"); + + /* No analog TV */ + if (AUVI_INPUT(0).type == AU0828_VMUX_UNDEFINED) + return 0; + mutex_lock(&au0828_sysfs_lock); video_unregister_device(&dev->vdev); video_unregister_device(&dev->vbi_dev); mutex_unlock(&au0828_sysfs_lock); + + v4l2_device_disconnect(&dev->v4l2_dev); + v4l2_device_put(&dev->v4l2_dev); + + return 1; } /* This function ensures that video frames continue to be delivered even if @@ -1312,7 +1458,6 @@ static int vidioc_enum_input(struct file *file, void *priv, [AU0828_VMUX_CABLE] = "Cable TV", [AU0828_VMUX_TELEVISION] = "Television", [AU0828_VMUX_DVB] = "DVB", - [AU0828_VMUX_DEBUG] = "tv debug" }; dprintk(1, "%s called std_set %d dev_state %d\n", __func__, @@ -1804,7 +1949,6 @@ static void au0828_analog_create_entities(struct au0828_dev *dev) [AU0828_VMUX_CABLE] = "Cable TV", [AU0828_VMUX_TELEVISION] = "Television", [AU0828_VMUX_DVB] = "DVB", - [AU0828_VMUX_DEBUG] = "tv debug" }; int ret, i; @@ -1840,11 +1984,9 @@ static void au0828_analog_create_entities(struct au0828_dev *dev) case AU0828_VMUX_CABLE: case AU0828_VMUX_TELEVISION: case AU0828_VMUX_DVB: + default: /* Just to shut up a warning */ ent->function = MEDIA_ENT_F_CONN_RF; break; - default: /* AU0828_VMUX_DEBUG */ - ent->function = MEDIA_ENT_F_CONN_TEST; - break; } ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); @@ -1871,6 +2013,10 @@ int au0828_analog_register(struct au0828_dev *dev, dprintk(1, "au0828_analog_register called for intf#%d!\n", interface->cur_altsetting->desc.bInterfaceNumber); + /* No analog TV */ + if (AUVI_INPUT(0).type == AU0828_VMUX_UNDEFINED) + return 0; + /* set au0828 usb interface0 to as5 */ retval = usb_set_interface(dev->usbdev, interface->cur_altsetting->desc.bInterfaceNumber, 5); @@ -1976,6 +2122,14 @@ int au0828_analog_register(struct au0828_dev *dev, ret = -ENODEV; goto err_reg_vbi_dev; } + retval = au0828_create_media_graph(dev); + if (retval) { + pr_err("%s() au0282_dev_register failed to create graph\n", + __func__); + ret = -ENODEV; + goto err_reg_vbi_dev; + } + dprintk(1, "%s completed!\n", __func__); diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h index 8276072bc55a..23f869cf11da 100644 --- a/drivers/media/usb/au0828/au0828.h +++ b/drivers/media/usb/au0828/au0828.h @@ -76,7 +76,6 @@ enum au0828_itype { AU0828_VMUX_CABLE, AU0828_VMUX_TELEVISION, AU0828_VMUX_DVB, - AU0828_VMUX_DEBUG }; struct au0828_input { @@ -301,6 +300,7 @@ struct au0828_dev { /* au0828-core.c */ extern u32 au0828_read(struct au0828_dev *dev, u16 reg); extern u32 au0828_write(struct au0828_dev *dev, u16 reg, u32 val); +extern void au0828_usb_release(struct au0828_dev *dev); extern int au0828_debug; /* ----------------------------------------------------------- */ @@ -319,16 +319,29 @@ extern int au0828_i2c_unregister(struct au0828_dev *dev); /* ----------------------------------------------------------- */ /* au0828-video.c */ -extern int au0828_analog_register(struct au0828_dev *dev, - struct usb_interface *interface); -extern void au0828_analog_unregister(struct au0828_dev *dev); extern int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count); extern void au0828_stop_vbi_streaming(struct vb2_queue *vq); #ifdef CONFIG_VIDEO_AU0828_V4L2 +extern int au0828_v4l2_device_register(struct usb_interface *interface, + struct au0828_dev *dev); + +extern int au0828_analog_register(struct au0828_dev *dev, + struct usb_interface *interface); +extern int au0828_analog_unregister(struct au0828_dev *dev); +extern void au0828_usb_v4l2_media_release(struct au0828_dev *dev); extern void au0828_v4l2_suspend(struct au0828_dev *dev); extern void au0828_v4l2_resume(struct au0828_dev *dev); #else +static inline int au0828_v4l2_device_register(struct usb_interface *interface, + struct au0828_dev *dev) +{ return 0; }; +static inline int au0828_analog_register(struct au0828_dev *dev, + struct usb_interface *interface) +{ return 0; }; +static inline int au0828_analog_unregister(struct au0828_dev *dev) +{ return 0; }; +static inline void au0828_usb_v4l2_media_release(struct au0828_dev *dev) { }; static inline void au0828_v4l2_suspend(struct au0828_dev *dev) { }; static inline void au0828_v4l2_resume(struct au0828_dev *dev) { }; #endif diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 0bd969063392..d4bdba60b0f7 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -10,7 +10,7 @@ /* Version information */ #define DRIVER_VERSION "0.1" #define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV USB Driver" -#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>" +#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>" /* debug */ #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c index 187012ce444b..0310fd6ed103 100644 --- a/drivers/media/usb/cpia2/cpia2_core.c +++ b/drivers/media/usb/cpia2/cpia2_core.c @@ -923,7 +923,7 @@ static int apply_vp_patch(struct camera_data *cam) /* ... followed by the data payload */ for (i = 2; i < fw->size; i += 64) { cmd.start = 0x0C; /* Data */ - cmd.reg_count = min_t(int, 64, fw->size - i); + cmd.reg_count = min_t(uint, 64, fw->size - i); memcpy(cmd.buffer.block_data, &fw->data[i], cmd.reg_count); cpia2_send_command(cam, &cmd); } diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c index 48643b94e694..c9320d6c6131 100644 --- a/drivers/media/usb/cx231xx/cx231xx-417.c +++ b/drivers/media/usb/cx231xx/cx231xx-417.c @@ -1382,6 +1382,8 @@ static int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb) buffer_size = urb->actual_length; buffer = kmalloc(buffer_size, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; memcpy(buffer, dma_q->ps_head, 3); memcpy(buffer+3, p_buffer, buffer_size-3); diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c index de4ae5eb4830..a6a9508418f8 100644 --- a/drivers/media/usb/cx231xx/cx231xx-audio.c +++ b/drivers/media/usb/cx231xx/cx231xx-audio.c @@ -499,6 +499,11 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream) } dev->adev.users--; + if (substream->runtime->dma_area) { + dev_dbg(dev->dev, "freeing\n"); + vfree(substream->runtime->dma_area); + substream->runtime->dma_area = NULL; + } mutex_unlock(&dev->lock); if (dev->adev.users == 0 && dev->adev.shutdown == 1) { diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index 620b83d03f75..9e3a5d2038c2 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -1212,70 +1212,15 @@ static int cx231xx_media_device_init(struct cx231xx *dev, #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev; - mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + mdev = v4l2_mc_usb_media_device_init(udev, dev->board.name); if (!mdev) return -ENOMEM; - mdev->dev = dev->dev; - strlcpy(mdev->model, dev->board.name, sizeof(mdev->model)); - if (udev->serial) - strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial)); - strcpy(mdev->bus_info, udev->devpath); - mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - mdev->driver_version = LINUX_VERSION_CODE; - - media_device_init(mdev); - dev->media_dev = mdev; #endif return 0; } -static int cx231xx_create_media_graph(struct cx231xx *dev) -{ -#ifdef CONFIG_MEDIA_CONTROLLER - struct media_device *mdev = dev->media_dev; - struct media_entity *entity; - struct media_entity *tuner = NULL, *decoder = NULL; - int ret; - - if (!mdev) - return 0; - - media_device_for_each_entity(entity, mdev) { - switch (entity->function) { - case MEDIA_ENT_F_TUNER: - tuner = entity; - break; - case MEDIA_ENT_F_ATV_DECODER: - decoder = entity; - break; - } - } - - /* Analog setup, using tuner as a link */ - - if (!decoder) - return 0; - - if (tuner) { - ret = media_create_pad_link(tuner, TUNER_PAD_IF_OUTPUT, decoder, 0, - MEDIA_LNK_FL_ENABLED); - if (ret < 0) - return ret; - } - ret = media_create_pad_link(decoder, 1, &dev->vdev.entity, 0, - MEDIA_LNK_FL_ENABLED); - if (ret < 0) - return ret; - ret = media_create_pad_link(decoder, 2, &dev->vbi_dev.entity, 0, - MEDIA_LNK_FL_ENABLED); - if (ret < 0) - return ret; -#endif - return 0; -} - /* * cx231xx_init_dev() * allocates and inits the device structs, registers i2c bus and v4l device @@ -1739,15 +1684,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface, /* load other modules required */ request_modules(dev); - retval = cx231xx_create_media_graph(dev); - if (retval < 0) - goto done; - #ifdef CONFIG_MEDIA_CONTROLLER - retval = media_device_register(dev->media_dev); -#endif + /* Init entities at the Media Controller */ + cx231xx_v4l2_create_entities(dev); -done: + retval = v4l2_mc_create_media_graph(dev->media_dev); + if (!retval) + retval = media_device_register(dev->media_dev); +#endif if (retval < 0) cx231xx_release_resources(dev); return retval; diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c index b8d5b2be9293..ab2fb9fa0cd1 100644 --- a/drivers/media/usb/cx231xx/cx231xx-dvb.c +++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c @@ -25,6 +25,7 @@ #include <media/v4l2-common.h> #include <media/videobuf-vmalloc.h> +#include <media/tuner.h> #include "xc5000.h" #include "s5h1432.h" @@ -551,7 +552,8 @@ static int register_dvb(struct cx231xx_dvb *dvb, /* register network adapter */ dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx); - result = dvb_create_media_graph(&dvb->adapter, false); + result = dvb_create_media_graph(&dvb->adapter, + dev->tuner_type == TUNER_ABSENT); if (result < 0) goto fail_create_graph; @@ -801,6 +803,9 @@ static int dvb_init(struct cx231xx *dev) /* attach tuner */ memset(&si2157_config, 0, sizeof(si2157_config)); si2157_config.fe = dev->dvb->frontend; +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + si2157_config.mdev = dev->media_dev; +#endif si2157_config.if_port = 1; si2157_config.inversion = true; strlcpy(info.type, "si2157", I2C_NAME_SIZE); @@ -857,6 +862,9 @@ static int dvb_init(struct cx231xx *dev) /* attach tuner */ memset(&si2157_config, 0, sizeof(si2157_config)); si2157_config.fe = dev->dvb->frontend; +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + si2157_config.mdev = dev->media_dev; +#endif si2157_config.if_port = 1; si2157_config.inversion = true; strlcpy(info.type, "si2157", I2C_NAME_SIZE); diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index 9b88cd8127ac..6414188ffdfa 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -1103,9 +1103,54 @@ static const char *iname[] = { [CX231XX_VMUX_TELEVISION] = "Television", [CX231XX_VMUX_CABLE] = "Cable TV", [CX231XX_VMUX_DVB] = "DVB", - [CX231XX_VMUX_DEBUG] = "for debug only", }; +void cx231xx_v4l2_create_entities(struct cx231xx *dev) +{ +#if defined(CONFIG_MEDIA_CONTROLLER) + int ret, i; + + /* Create entities for each input connector */ + for (i = 0; i < MAX_CX231XX_INPUT; i++) { + struct media_entity *ent = &dev->input_ent[i]; + + if (!INPUT(i)->type) + break; + + ent->name = iname[INPUT(i)->type]; + ent->flags = MEDIA_ENT_FL_CONNECTOR; + dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE; + + switch (INPUT(i)->type) { + case CX231XX_VMUX_COMPOSITE1: + ent->function = MEDIA_ENT_F_CONN_COMPOSITE; + break; + case CX231XX_VMUX_SVIDEO: + ent->function = MEDIA_ENT_F_CONN_SVIDEO; + break; + case CX231XX_VMUX_TELEVISION: + case CX231XX_VMUX_CABLE: + case CX231XX_VMUX_DVB: + /* The DVB core will handle it */ + if (dev->tuner_type == TUNER_ABSENT) + continue; + /* fall though */ + default: /* just to shut up a gcc warning */ + ent->function = MEDIA_ENT_F_CONN_RF; + break; + } + + ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); + if (ret < 0) + pr_err("failed to initialize input pad[%d]!\n", i); + + ret = media_device_register_entity(dev->media_dev, ent); + if (ret < 0) + pr_err("failed to register input entity %d!\n", i); + } +#endif +} + int cx231xx_enum_input(struct file *file, void *priv, struct v4l2_input *i) { diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h index ec6d3f5bc36d..69f6d20870f5 100644 --- a/drivers/media/usb/cx231xx/cx231xx.h +++ b/drivers/media/usb/cx231xx/cx231xx.h @@ -281,7 +281,6 @@ enum cx231xx_itype { CX231XX_VMUX_CABLE, CX231XX_RADIO, CX231XX_VMUX_DVB, - CX231XX_VMUX_DEBUG }; enum cx231xx_v_input { @@ -663,6 +662,8 @@ struct cx231xx { #if defined(CONFIG_MEDIA_CONTROLLER) struct media_device *media_dev; struct media_pad video_pad, vbi_pad; + struct media_entity input_ent[MAX_CX231XX_INPUT]; + struct media_pad input_pad[MAX_CX231XX_INPUT]; #endif unsigned char eedata[256]; @@ -943,6 +944,7 @@ int cx231xx_register_extension(struct cx231xx_ops *dev); void cx231xx_unregister_extension(struct cx231xx_ops *dev); void cx231xx_init_extension(struct cx231xx *dev); void cx231xx_close_extension(struct cx231xx *dev); +void cx231xx_v4l2_create_entities(struct cx231xx *dev); int cx231xx_querycap(struct file *file, void *priv, struct v4l2_capability *cap); int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t); diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 6e02a15d39ce..b3c09fe54d9b 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -684,7 +684,7 @@ static int af9035_download_firmware(struct dvb_usb_device *d, if (ret < 0) goto err; - if (tmp == 1 || tmp == 3) { + if (tmp == 1 || tmp == 3 || tmp == 5) { /* configure gpioh1, reset & power slave demod */ ret = af9035_wr_reg_mask(d, 0x00d8b0, 0x01, 0x01); if (ret < 0) @@ -823,7 +823,7 @@ static int af9035_read_config(struct dvb_usb_device *d) if (ret < 0) goto err; - if (tmp == 1 || tmp == 3) + if (tmp == 1 || tmp == 3 || tmp == 5) state->dual_mode = true; dev_dbg(&d->udev->dev, "%s: ts mode=%d dual mode=%d\n", __func__, diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h index 416a97f05ec8..df22001f9e41 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.h +++ b/drivers/media/usb/dvb-usb-v2/af9035.h @@ -112,9 +112,10 @@ static const u32 clock_lut_it9135[] = { * 0 TS * 1 DCA + PIP * 3 PIP + * 5 DCA + PIP * n DCA * - * Values 0 and 3 are seen to this day. 0 for single TS and 3 for dual TS. + * Values 0, 3 and 5 are seen to this day. 0 for single TS and 3/5 for dual TS. */ #define EEPROM_BASE_AF9035 0x42fd diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h index 023d91f7e654..35f27e2e4e28 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h @@ -1,7 +1,7 @@ /* * DVB USB framework * - * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de> + * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de> * Copyright (C) 2012 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_common.h b/drivers/media/usb/dvb-usb-v2/dvb_usb_common.h index 45f07090d431..a1622bda2a5e 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb_common.h +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_common.h @@ -1,7 +1,7 @@ /* * DVB USB framework * - * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de> + * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de> * Copyright (C) 2012 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c index f0565bf3673e..4a8769781cea 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c @@ -1,7 +1,7 @@ /* * DVB USB framework * - * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de> + * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de> * Copyright (C) 2012 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify @@ -20,6 +20,7 @@ */ #include "dvb_usb_common.h" +#include <media/v4l2-mc.h> static int dvb_usbv2_disable_rc_polling; module_param_named(disable_rc_polling, dvb_usbv2_disable_rc_polling, int, 0644); @@ -407,20 +408,10 @@ static int dvb_usbv2_media_device_init(struct dvb_usb_adapter *adap) struct dvb_usb_device *d = adap_to_d(adap); struct usb_device *udev = d->udev; - mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + mdev = v4l2_mc_usb_media_device_init(udev, d->name); if (!mdev) return -ENOMEM; - mdev->dev = &udev->dev; - strlcpy(mdev->model, d->name, sizeof(mdev->model)); - if (udev->serial) - strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial)); - strcpy(mdev->bus_info, udev->devpath); - mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - mdev->driver_version = LINUX_VERSION_CODE; - - media_device_init(mdev); - dvb_register_media_controller(&adap->dvb_adap, mdev); dev_info(&d->udev->dev, "media controller created\n"); @@ -1129,7 +1120,7 @@ int dvb_usbv2_reset_resume(struct usb_interface *intf) EXPORT_SYMBOL(dvb_usbv2_reset_resume); MODULE_VERSION("2.0"); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("DVB USB common"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c index 22bdce15ecf3..5bafeb6486be 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c @@ -1,7 +1,7 @@ /* * DVB USB framework * - * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de> + * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de> * Copyright (C) 2012 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c index 1dd962535f97..02dbc6c45423 100644 --- a/drivers/media/usb/dvb-usb-v2/dvbsky.c +++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c @@ -847,10 +847,17 @@ static const struct usb_device_id dvbsky_id_table[] = { USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI, &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI", RC_MAP_TT_1500) }, + { DVB_USB_DEVICE(USB_VID_TECHNOTREND, + USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2, + &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI v1.1", + RC_MAP_TT_1500) }, { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_H7_3, &dvbsky_t680c_props, "Terratec H7 Rev.4", RC_MAP_TT_1500) }, + { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R4, + &dvbsky_s960_props, "Terratec Cinergy S2 Rev.4", + RC_MAP_DVBSKY) }, { } }; MODULE_DEVICE_TABLE(usb, dvbsky_id_table); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c index 84f6de6fa07d..047a32fe43ea 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c @@ -507,9 +507,9 @@ static int mxl111sf_demod_read_signal_strength(struct dvb_frontend *fe, return 0; } -static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe) +static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *p) { - struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct mxl111sf_demod_state *state = fe->demodulator_priv; mxl_dbg("()"); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c index 444579be0b77..7d16252dbb71 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c @@ -36,7 +36,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))."); struct mxl111sf_tuner_state { struct mxl111sf_state *mxl_state; - struct mxl111sf_tuner_config *cfg; + const struct mxl111sf_tuner_config *cfg; enum mxl_if_freq if_freq; @@ -489,8 +489,8 @@ static struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = { }; struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe, - struct mxl111sf_state *mxl_state, - struct mxl111sf_tuner_config *cfg) + struct mxl111sf_state *mxl_state, + const struct mxl111sf_tuner_config *cfg) { struct mxl111sf_tuner_state *state = NULL; diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h index e6caab21a197..509b55071218 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h @@ -63,13 +63,13 @@ struct mxl111sf_tuner_config { #if IS_ENABLED(CONFIG_DVB_USB_MXL111SF) extern struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe, - struct mxl111sf_state *mxl_state, - struct mxl111sf_tuner_config *cfg); + struct mxl111sf_state *mxl_state, + const struct mxl111sf_tuner_config *cfg); #else static inline struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe, - struct mxl111sf_state *mxl_state, - struct mxl111sf_tuner_config *cfg) + struct mxl111sf_state *mxl_state, + const struct mxl111sf_tuner_config *cfg) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); return NULL; diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index b669deccc34c..5d676b533a3a 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -856,7 +856,7 @@ static int mxl111sf_ant_hunt(struct dvb_frontend *fe) return 0; } -static struct mxl111sf_tuner_config mxl_tuner_config = { +static const struct mxl111sf_tuner_config mxl_tuner_config = { .if_freq = MXL_IF_6_0, /* applies to external IF output, only */ .invert_spectrum = 0, .read_reg = mxl111sf_read_reg, @@ -888,7 +888,7 @@ static int mxl111sf_attach_tuner(struct dvb_usb_adapter *adap) state->tuner.function = MEDIA_ENT_F_TUNER; state->tuner.name = "mxl111sf tuner"; state->tuner_pads[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK; - state->tuner_pads[TUNER_PAD_IF_OUTPUT].flags = MEDIA_PAD_FL_SOURCE; + state->tuner_pads[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&state->tuner, TUNER_NUM_PADS, state->tuner_pads); diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index eb5787a3191e..c4c6e92e8643 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -259,6 +259,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = -EOPNOTSUPP; } + /* Retry failed I2C messages */ + if (ret == -EPIPE) + ret = -EAGAIN; + err_mutex_unlock: mutex_unlock(&d->i2c_mutex); @@ -619,6 +623,10 @@ static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name) } dev_dbg(&d->intf->dev, "chip_id=%u\n", dev->chip_id); + /* Retry failed I2C messages */ + d->i2c_adap.retries = 1; + d->i2c_adap.timeout = msecs_to_jiffies(10); + return WARM; err: dev_dbg(&d->intf->dev, "failed=%d\n", ret); diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c index ca8f3c2b1082..55136cde38f5 100644 --- a/drivers/media/usb/dvb-usb-v2/usb_urb.c +++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c @@ -1,6 +1,6 @@ /* usb-urb.c is part of the DVB USB library. * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file keeps functions for initializing and handling the diff --git a/drivers/media/usb/dvb-usb/a800.c b/drivers/media/usb/dvb-usb/a800.c index 83684ed023cd..7ba975bea96a 100644 --- a/drivers/media/usb/dvb-usb/a800.c +++ b/drivers/media/usb/dvb-usb/a800.c @@ -1,7 +1,7 @@ /* DVB USB framework compliant Linux driver for the AVerMedia AverTV DVB-T * USB2.0 (A800) DVB-T receiver. * - * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de) * * Thanks to * - AVerMedia who kindly provided information and @@ -185,7 +185,7 @@ static struct usb_driver a800_driver = { module_usb_driver(a800_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("AVerMedia AverTV DVB-T USB 2.0 (A800)"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c index ac97075d75f7..09db3d02bd82 100644 --- a/drivers/media/usb/dvb-usb/af9005-fe.c +++ b/drivers/media/usb/dvb-usb/af9005-fe.c @@ -1227,9 +1227,9 @@ static int af9005_fe_set_frontend(struct dvb_frontend *fe) return 0; } -static int af9005_fe_get_frontend(struct dvb_frontend *fe) +static int af9005_fe_get_frontend(struct dvb_frontend *fe, + struct dtv_frontend_properties *fep) { - struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct af9005_fe_state *state = fe->demodulator_priv; int ret; u8 temp; diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index ab7151181728..907ac01ae297 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -13,7 +13,7 @@ * * TODO: Use the cx25840-driver for the analogue part * - * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de) * Copyright (C) 2006 Michael Krufky (mkrufky@linuxtv.org) * Copyright (C) 2006, 2007 Chris Pascoe (c.pascoe@itee.uq.edu.au) * @@ -2314,7 +2314,7 @@ static struct usb_driver cxusb_driver = { module_usb_driver(cxusb_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>"); MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>"); MODULE_DESCRIPTION("Driver for Conexant USB2.0 hybrid reference design"); diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index 0d248ce02a9b..c16f999b9d7c 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -881,7 +881,7 @@ static struct usb_driver dib0700_driver = { module_usb_driver(dib0700_driver); MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw"); -MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 7ed49646a699..ea0391e32d23 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -1736,8 +1736,13 @@ static int dib809x_tuner_attach(struct dvb_usb_adapter *adap) struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); - if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL) - return -ENODEV; + if (adap->id == 0) { + if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL) + return -ENODEV; + } else { + if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL) + return -ENODEV; + } st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override; @@ -1773,6 +1778,20 @@ static int stk809x_frontend_attach(struct dvb_usb_adapter *adap) return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } +static int stk809x_frontend1_attach(struct dvb_usb_adapter *adap) +{ + struct dib0700_adapter_state *state = adap->priv; + + if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) + return -ENODEV; + + state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x82, 0); + + adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]); + + return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; +} + static int nim8096md_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; @@ -3794,6 +3813,7 @@ struct usb_device_id dib0700_usb_id_table[] = { /* 80 */{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT_2) }, { USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_2002E) }, { USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_2002E_SE) }, + { USB_DEVICE(USB_VID_PCTV, USB_PID_DIBCOM_STK8096PVR) }, { 0 } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table); @@ -4959,6 +4979,59 @@ struct dvb_usb_device_properties dib0700_devices[] = { RC_BIT_NEC, .change_protocol = dib0700_change_protocol, }, + }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, + .num_adapters = 2, + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .caps = DVB_USB_ADAP_HAS_PID_FILTER | + DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, + .pid_filter_count = 32, + .pid_filter = stk80xx_pid_filter, + .pid_filter_ctrl = stk80xx_pid_filter_ctrl, + .frontend_attach = stk809x_frontend_attach, + .tuner_attach = dib809x_tuner_attach, + + DIB0700_DEFAULT_STREAMING_CONFIG(0x02), + } }, + .size_of_priv = + sizeof(struct dib0700_adapter_state), + }, { + .num_frontends = 1, + .fe = { { + .caps = DVB_USB_ADAP_HAS_PID_FILTER | + DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, + .pid_filter_count = 32, + .pid_filter = stk80xx_pid_filter, + .pid_filter_ctrl = stk80xx_pid_filter_ctrl, + .frontend_attach = stk809x_frontend1_attach, + .tuner_attach = dib809x_tuner_attach, + + DIB0700_DEFAULT_STREAMING_CONFIG(0x03), + } }, + .size_of_priv = + sizeof(struct dib0700_adapter_state), + }, + }, + .num_device_descs = 1, + .devices = { + { "DiBcom STK8096-PVR reference design", + { &dib0700_usb_id_table[83], NULL }, + { NULL }, + }, + }, + + .rc.core = { + .rc_interval = DEFAULT_RC_INTERVAL, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, + .module_name = "dib0700", + .rc_query = dib0700_rc_query_old_firmware, + .allowed_protos = RC_BIT_RC5 | + RC_BIT_RC6_MCE | + RC_BIT_NEC, + .change_protocol = dib0700_change_protocol, + }, }, }; diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c index ef3a8f75f82e..35de6095926d 100644 --- a/drivers/media/usb/dvb-usb/dibusb-common.c +++ b/drivers/media/usb/dvb-usb/dibusb-common.c @@ -1,6 +1,6 @@ /* Common methods for dibusb-based-receivers. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c index a4ac37e0e98b..a0057641cc86 100644 --- a/drivers/media/usb/dvb-usb/dibusb-mb.c +++ b/drivers/media/usb/dvb-usb/dibusb-mb.c @@ -1,10 +1,10 @@ /* DVB USB compliant linux driver for mobile DVB-T USB devices based on * reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-B) * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * based on GPL code from DiBcom, which has - * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr) + * Copyright (C) 2004 Amaury Demol for DiBcom * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -465,7 +465,7 @@ static struct usb_driver dibusb_driver = { module_usb_driver(dibusb_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for DiBcom USB DVB-T devices (DiB3000M-B based)"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/dibusb-mc.c b/drivers/media/usb/dvb-usb/dibusb-mc.c index 9d1a59d09c52..08fb8a3f6e0c 100644 --- a/drivers/media/usb/dvb-usb/dibusb-mc.c +++ b/drivers/media/usb/dvb-usb/dibusb-mc.c @@ -1,10 +1,10 @@ /* DVB USB compliant linux driver for mobile DVB-T USB devices based on * reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-C/P) * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * based on GPL code from DiBcom, which has - * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr) + * Copyright (C) 2004 Amaury Demol for DiBcom * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -143,7 +143,7 @@ static struct usb_driver dibusb_mc_driver = { module_usb_driver(dibusb_mc_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for DiBcom USB2.0 DVB-T (DiB3000M-C/P based) devices"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h index 32ab1392313f..3f82163d8ab8 100644 --- a/drivers/media/usb/dvb-usb/dibusb.h +++ b/drivers/media/usb/dvb-usb/dibusb.h @@ -1,6 +1,6 @@ /* Header file for all dibusb-based-receivers. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c index 772bde3c5020..63134335c994 100644 --- a/drivers/media/usb/dvb-usb/digitv.c +++ b/drivers/media/usb/dvb-usb/digitv.c @@ -1,7 +1,7 @@ /* DVB USB compliant linux driver for Nebula Electronics uDigiTV DVB-T USB2.0 * receiver * - * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de) * * partly based on the SDK published by Nebula Electronics * @@ -348,7 +348,7 @@ static struct usb_driver digitv_driver = { module_usb_driver(digitv_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for Nebula Electronics uDigiTV DVB-T USB2.0"); MODULE_VERSION("1.0-alpha"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c index 8637ad1be6be..c09332bd99cb 100644 --- a/drivers/media/usb/dvb-usb/dtt200u-fe.c +++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c @@ -1,7 +1,7 @@ /* Frontend part of the Linux driver for the WideView/ Yakumo/ Hama/ * Typhoon/ Yuan DVB-T USB2.0 receiver. * - * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de> + * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -140,10 +140,11 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend *fe) return 0; } -static int dtt200u_fe_get_frontend(struct dvb_frontend* fe) +static int dtt200u_fe_get_frontend(struct dvb_frontend* fe, + struct dtv_frontend_properties *fep) { - struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct dtt200u_fe_state *state = fe->demodulator_priv; + memcpy(fep, &state->fep, sizeof(struct dtv_frontend_properties)); return 0; } diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c index c357fb3b0a88..ca3b69aa9688 100644 --- a/drivers/media/usb/dvb-usb/dtt200u.c +++ b/drivers/media/usb/dvb-usb/dtt200u.c @@ -1,7 +1,7 @@ /* DVB USB library compliant Linux driver for the WideView/ Yakumo/ Hama/ * Typhoon/ Yuan/ Miglia DVB-T USB2.0 receiver. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * Thanks to Steve Chang from WideView for providing support for the WT-220U. * @@ -362,7 +362,7 @@ static struct usb_driver dtt200u_usb_driver = { module_usb_driver(dtt200u_usb_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for the WideView/Yakumo/Hama/Typhoon/Club3D/Miglia DVB-T USB2.0 devices"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/dtt200u.h b/drivers/media/usb/dvb-usb/dtt200u.h index 005b0a7df358..efccc399b1cb 100644 --- a/drivers/media/usb/dvb-usb/dtt200u.h +++ b/drivers/media/usb/dvb-usb/dtt200u.h @@ -1,7 +1,7 @@ /* Common header file of Linux driver for the WideView/ Yakumo/ Hama/ * Typhoon/ Yuan DVB-T USB2.0 receiver. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/drivers/media/usb/dvb-usb/dvb-usb-common.h b/drivers/media/usb/dvb-usb/dvb-usb-common.h index 6b7b2a89242e..7e619d638809 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-common.h +++ b/drivers/media/usb/dvb-usb/dvb-usb-common.h @@ -1,6 +1,6 @@ /* dvb-usb-common.h is part of the DVB USB library. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * a header file containing prototypes and types for internal use of the dvb-usb-lib diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c index 9ddfcab268be..513b0c14e4f0 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c @@ -1,12 +1,13 @@ /* dvb-usb-dvb.c is part of the DVB USB library. * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for initializing and handling the * linux-dvb API. */ #include "dvb-usb-common.h" +#include <media/v4l2-mc.h> /* does the complete input transfer handling */ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff) @@ -102,19 +103,7 @@ static int dvb_usb_media_device_init(struct dvb_usb_adapter *adap) struct dvb_usb_device *d = adap->dev; struct usb_device *udev = d->udev; - mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); - if (!mdev) - return -ENOMEM; - - mdev->dev = &udev->dev; - strlcpy(mdev->model, d->desc->name, sizeof(mdev->model)); - if (udev->serial) - strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial)); - strcpy(mdev->bus_info, udev->devpath); - mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - mdev->driver_version = LINUX_VERSION_CODE; - - media_device_init(mdev); + mdev = v4l2_mc_usb_media_device_init(udev, d->desc->name); dvb_register_media_controller(&adap->dvb_adap, mdev); diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c index 733a7ff7b207..dd048a7c461c 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c @@ -1,6 +1,6 @@ /* dvb-usb-firmware.c is part of the DVB USB library. * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for downloading the firmware to Cypress FX 1 and 2 based devices. diff --git a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c index 88e4a62abc44..4f0b0adce7f5 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c @@ -1,6 +1,6 @@ /* dvb-usb-i2c.c is part of the DVB USB library. * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for (de-)initializing an I2C adapter. diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 1adf325012f7..3896ba9a4179 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -3,7 +3,7 @@ * * dvb-usb-init.c * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -299,6 +299,6 @@ void dvb_usb_device_exit(struct usb_interface *intf) EXPORT_SYMBOL(dvb_usb_device_exit); MODULE_VERSION("1.0"); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("A library module containing commonly used USB and DVB function USB DVB devices"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/dvb-usb-remote.c b/drivers/media/usb/dvb-usb/dvb-usb-remote.c index 7b5dae3077f6..c259f9e43542 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-remote.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-remote.c @@ -1,6 +1,6 @@ /* dvb-usb-remote.c is part of the DVB USB library. * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for initializing the input-device and for handling remote-control-queries. diff --git a/drivers/media/usb/dvb-usb/dvb-usb-urb.c b/drivers/media/usb/dvb-usb/dvb-usb-urb.c index 5c8f651344fc..95f9097498cb 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-urb.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-urb.c @@ -1,6 +1,6 @@ /* dvb-usb-urb.c is part of the DVB USB library. * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file keeps functions for initializing and handling the diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h index ce4c4e3b58bb..639c4678c65b 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb.h +++ b/drivers/media/usb/dvb-usb/dvb-usb.h @@ -1,6 +1,6 @@ /* dvb-usb.h is part of the DVB USB library. * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * the headerfile, all dvb-usb-drivers have to include. diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 14ef25dc6cd3..dd46d6c78c4e 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -1688,6 +1688,7 @@ enum dw2102_table_entry { TECHNOTREND_S2_4600, TEVII_S482_1, TEVII_S482_2, + TERRATEC_CINERGY_S2_BOX, }; static struct usb_device_id dw2102_table[] = { @@ -1702,19 +1703,20 @@ static struct usb_device_id dw2102_table[] = { [TEVII_S660] = {USB_DEVICE(0x9022, USB_PID_TEVII_S660)}, [PROF_7500] = {USB_DEVICE(0x3034, 0x7500)}, [GENIATECH_SU3000] = {USB_DEVICE(0x1f4d, 0x3000)}, - [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00a8)}, + [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R1)}, [TEVII_S480_1] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)}, [TEVII_S480_2] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)}, [X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)}, [TEVII_S421] = {USB_DEVICE(0x9022, USB_PID_TEVII_S421)}, [TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)}, - [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00b0)}, + [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R2)}, [GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)}, [GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)}, [TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_S2_4600)}, [TEVII_S482_1] = {USB_DEVICE(0x9022, 0xd483)}, [TEVII_S482_2] = {USB_DEVICE(0x9022, 0xd484)}, + [TERRATEC_CINERGY_S2_BOX] = {USB_DEVICE(USB_VID_TERRATEC, 0x0105)}, { } }; @@ -2232,7 +2234,7 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = { } }, } }, - .num_device_descs = 3, + .num_device_descs = 4, .devices = { { "TechnoTrend TT-connect S2-4600", { &dw2102_table[TECHNOTREND_S2_4600], NULL }, @@ -2246,6 +2248,10 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = { { &dw2102_table[TEVII_S482_2], NULL }, { NULL }, }, + { "Terratec Cinergy S2 USB BOX", + { &dw2102_table[TERRATEC_CINERGY_S2_BOX], NULL }, + { NULL }, + }, } }; diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c index 8ec92fbeabad..979f05b4b87c 100644 --- a/drivers/media/usb/dvb-usb/friio-fe.c +++ b/drivers/media/usb/dvb-usb/friio-fe.c @@ -283,20 +283,6 @@ static int jdvbt90502_set_property(struct dvb_frontend *fe, return r; } -static int jdvbt90502_get_frontend(struct dvb_frontend *fe) -{ - struct dtv_frontend_properties *p = &fe->dtv_property_cache; - p->inversion = INVERSION_AUTO; - p->bandwidth_hz = 6000000; - p->code_rate_HP = FEC_AUTO; - p->code_rate_LP = FEC_AUTO; - p->modulation = QAM_64; - p->transmission_mode = TRANSMISSION_MODE_AUTO; - p->guard_interval = GUARD_INTERVAL_AUTO; - p->hierarchy = HIERARCHY_AUTO; - return 0; -} - static int jdvbt90502_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; @@ -312,8 +298,16 @@ static int jdvbt90502_set_frontend(struct dvb_frontend *fe) deb_fe("%s: Freq:%d\n", __func__, p->frequency); - /* for recovery from DTV_CLEAN */ - fe->dtv_property_cache.delivery_system = SYS_ISDBT; + /* This driver only works on auto mode */ + p->inversion = INVERSION_AUTO; + p->bandwidth_hz = 6000000; + p->code_rate_HP = FEC_AUTO; + p->code_rate_LP = FEC_AUTO; + p->modulation = QAM_64; + p->transmission_mode = TRANSMISSION_MODE_AUTO; + p->guard_interval = GUARD_INTERVAL_AUTO; + p->hierarchy = HIERARCHY_AUTO; + p->delivery_system = SYS_ISDBT; ret = jdvbt90502_pll_set_freq(state, p->frequency); if (ret) { @@ -466,7 +460,6 @@ static struct dvb_frontend_ops jdvbt90502_ops = { .set_property = jdvbt90502_set_property, .set_frontend = jdvbt90502_set_frontend, - .get_frontend = jdvbt90502_get_frontend, .read_status = jdvbt90502_read_status, .read_signal_strength = jdvbt90502_read_signal_strength, diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c index 6c55384e2fca..fc7569e2728d 100644 --- a/drivers/media/usb/dvb-usb/nova-t-usb2.c +++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c @@ -1,7 +1,7 @@ /* DVB USB framework compliant Linux driver for the Hauppauge WinTV-NOVA-T usb2 * DVB-T receiver. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -227,7 +227,7 @@ static struct usb_driver nova_t_driver = { module_usb_driver(nova_t_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Hauppauge WinTV-NOVA-T usb2"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c index 6c3c47722955..51487d2f7764 100644 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c @@ -512,7 +512,7 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a) &a->dev->i2c_adap, STV090x_DEMODULATOR_0); if (a->fe_adap[0].fe) { - struct stv6110x_devctl *ctl; + const struct stv6110x_devctl *ctl; ctl = dvb_attach(stv6110x_attach, a->fe_adap[0].fe, diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c index f10717311e05..ecc207fbaf3c 100644 --- a/drivers/media/usb/dvb-usb/ttusb2.c +++ b/drivers/media/usb/dvb-usb/ttusb2.c @@ -820,7 +820,7 @@ static struct usb_driver ttusb2_driver = { module_usb_driver(ttusb2_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for Pinnacle PCTV 400e DVB-S USB2.0"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/umt-010.c b/drivers/media/usb/dvb-usb/umt-010.c index 9b042292e788..58ad5b4f856c 100644 --- a/drivers/media/usb/dvb-usb/umt-010.c +++ b/drivers/media/usb/dvb-usb/umt-010.c @@ -1,7 +1,7 @@ /* DVB USB framework compliant Linux driver for the HanfTek UMT-010 USB2.0 * DVB-T receiver. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -145,7 +145,7 @@ static struct usb_driver umt_driver = { module_usb_driver(umt_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/usb-urb.c b/drivers/media/usb/dvb-usb/usb-urb.c index d62ee0f5a165..89173603be67 100644 --- a/drivers/media/usb/dvb-usb/usb-urb.c +++ b/drivers/media/usb/dvb-usb/usb-urb.c @@ -1,6 +1,6 @@ /* usb-urb.c is part of the DVB USB library. * - * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file keeps functions for initializing and handling the diff --git a/drivers/media/usb/dvb-usb/vp702x-fe.c b/drivers/media/usb/dvb-usb/vp702x-fe.c index d361a72ca0fa..27398c08c69d 100644 --- a/drivers/media/usb/dvb-usb/vp702x-fe.c +++ b/drivers/media/usb/dvb-usb/vp702x-fe.c @@ -4,7 +4,7 @@ * Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de> * Metzler Brothers Systementwicklung GbR * - * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de> + * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de> * * Thanks to Twinhan who kindly provided hardware and information. * diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c index ee1e19e36445..40de33de90a7 100644 --- a/drivers/media/usb/dvb-usb/vp702x.c +++ b/drivers/media/usb/dvb-usb/vp702x.c @@ -4,7 +4,7 @@ * Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de> * Metzler Brothers Systementwicklung GbR * - * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de> + * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de> * * Thanks to Twinhan who kindly provided hardware and information. * @@ -439,7 +439,7 @@ static struct usb_driver vp702x_usb_driver = { module_usb_driver(vp702x_usb_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for Twinhan StarBox DVB-S USB2.0 and clones"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/vp7045-fe.c b/drivers/media/usb/dvb-usb/vp7045-fe.c index e708afc6a57f..7765602ea658 100644 --- a/drivers/media/usb/dvb-usb/vp7045-fe.c +++ b/drivers/media/usb/dvb-usb/vp7045-fe.c @@ -1,7 +1,7 @@ /* DVB frontend part of the Linux driver for TwinhanDTV Alpha/MagicBoxII USB2.0 * DVB-T receiver. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * Thanks to Twinhan who kindly provided hardware and information. * diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c index d750724132ee..13340af0d39c 100644 --- a/drivers/media/usb/dvb-usb/vp7045.c +++ b/drivers/media/usb/dvb-usb/vp7045.c @@ -2,7 +2,7 @@ * - TwinhanDTV Alpha/MagicBoxII USB2.0 DVB-T receiver * - DigitalNow TinyUSB2 DVB-t receiver * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * Thanks to Twinhan who kindly provided hardware and information. * @@ -296,7 +296,7 @@ static struct usb_driver vp7045_usb_driver = { module_usb_driver(vp7045_usb_driver); -MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); +MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for Twinhan MagicBox/Alpha and DNTV tinyUSB2 DVB-T USB2.0"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/usb/dvb-usb/vp7045.h b/drivers/media/usb/dvb-usb/vp7045.h index cf5ec46f8bb1..66499932ca76 100644 --- a/drivers/media/usb/dvb-usb/vp7045.h +++ b/drivers/media/usb/dvb-usb/vp7045.h @@ -1,7 +1,7 @@ /* Common header-file of the Linux driver for the TwinhanDTV Alpha/MagicBoxII * USB2.0 DVB-T receiver. * - * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * Thanks to Twinhan who kindly provided hardware and information. * diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c index b58acd3fcd99..72f3f4d50253 100644 --- a/drivers/media/usb/em28xx/em28xx-camera.c +++ b/drivers/media/usb/em28xx/em28xx-camera.c @@ -64,6 +64,8 @@ static int em28xx_initialize_mt9m111(struct em28xx *dev) i2c_master_send(&dev->i2c_client[dev->def_i2c_bus], ®s[i][0], 3); + /* FIXME: This won't be creating a sensor at the media graph */ + return 0; } @@ -91,6 +93,8 @@ static int em28xx_initialize_mt9m001(struct em28xx *dev) i2c_master_send(&dev->i2c_client[dev->def_i2c_bus], ®s[i][0], 3); + /* FIXME: This won't be creating a sensor at the media graph */ + return 0; } diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index a1b6ef5894a6..389e95fb0211 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -32,7 +32,7 @@ #include <media/tuner.h> #include <media/drv-intf/msp3400.h> #include <media/i2c/saa7115.h> -#include <media/i2c/tvp5150.h> +#include <dt-bindings/media/tvp5150.h> #include <media/i2c/tvaudio.h> #include <media/i2c-addr.h> #include <media/tveeprom.h> @@ -570,7 +570,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, .gpio = silvercrest_reg_seq, @@ -583,7 +583,7 @@ struct em28xx_board em28xx_boards[] = { .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -605,7 +605,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, } }, @@ -616,7 +616,7 @@ struct em28xx_board em28xx_boards[] = { .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -635,7 +635,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -655,7 +655,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -675,7 +675,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -715,7 +715,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -735,7 +735,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -755,7 +755,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -775,7 +775,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -800,7 +800,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE4, .amux = EM28XX_AMUX_AUX, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE5, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -819,7 +819,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, } }, @@ -829,7 +829,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, .gpio = silvercrest_reg_seq, @@ -848,7 +848,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { @@ -863,7 +863,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_SAA711X, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -879,7 +879,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, } }, @@ -889,7 +889,7 @@ struct em28xx_board em28xx_boards[] = { .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, /* Capture only device */ .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -909,7 +909,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -930,7 +930,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -952,7 +952,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -974,7 +974,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -992,7 +992,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1006,7 +1006,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_TVP5150, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1029,7 +1029,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, @@ -1100,7 +1100,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = terratec_cinergy_USB_XS_FR_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_cinergy_USB_XS_FR_analog, @@ -1186,7 +1186,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -1213,7 +1213,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -1239,7 +1239,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -1265,7 +1265,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -1291,7 +1291,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -1317,7 +1317,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -1343,7 +1343,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, @@ -1368,7 +1368,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -1392,7 +1392,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE4, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1413,7 +1413,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1428,7 +1428,7 @@ struct em28xx_board em28xx_boards[] = { .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, /* capture only board */ .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1443,7 +1443,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, /* Capture-only board */ .decoder = EM28XX_SAA711X, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, .gpio = vc211a_enable, @@ -1465,7 +1465,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1485,7 +1485,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1500,7 +1500,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, /* capture only board */ .decoder = EM28XX_SAA711X, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1520,7 +1520,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1541,7 +1541,7 @@ struct em28xx_board em28xx_boards[] = { .aout = EM28XX_AOUT_MONO | /* I2S */ EM28XX_AOUT_MASTER, /* Line out pin */ }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1555,6 +1555,7 @@ struct em28xx_board em28xx_boards[] = { .buttons = std_snapshot_button, .tda9887_conf = TDA9887_PRESENT, .tuner_type = TUNER_YMEC_TVF_5533MF, + .tuner_addr = 0x60, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, @@ -1563,7 +1564,7 @@ struct em28xx_board em28xx_boards[] = { .aout = EM28XX_AOUT_MONO | /* I2S */ EM28XX_AOUT_MASTER, /* Line out pin */ }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1581,7 +1582,7 @@ struct em28xx_board em28xx_boards[] = { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, } }, }, @@ -1610,7 +1611,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = em2880_msi_digivox_ad_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, @@ -1633,7 +1634,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = em2880_msi_digivox_ad_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, @@ -1654,7 +1655,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1677,7 +1678,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, @@ -1708,7 +1709,7 @@ struct em28xx_board em28xx_boards[] = { .gpio = em2882_kworld_315u_analog, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2882_kworld_315u_analog1, @@ -1735,7 +1736,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, @@ -1758,7 +1759,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, @@ -1782,7 +1783,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = pinnacle_hybrid_pro_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, @@ -1808,7 +1809,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -1834,7 +1835,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1859,7 +1860,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, @@ -1904,7 +1905,7 @@ struct em28xx_board em28xx_boards[] = { .gpio = kworld_330u_analog, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = kworld_330u_analog, @@ -1951,7 +1952,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1970,7 +1971,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -1990,7 +1991,7 @@ struct em28xx_board em28xx_boards[] = { .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { /* Composite has not been tested yet */ - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_VIDEO, }, { /* S-video has not been tested yet */ @@ -2006,7 +2007,7 @@ struct em28xx_board em28xx_boards[] = { .decoder = EM28XX_SAA711X, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -2023,7 +2024,7 @@ struct em28xx_board em28xx_boards[] = { .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .mute_gpio = terratec_av350_mute_gpio, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AUDIO_SRC_LINE, .gpio = terratec_av350_unmute_gpio, @@ -2041,7 +2042,7 @@ struct em28xx_board em28xx_boards[] = { .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, /* Capture only device */ .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -2067,7 +2068,7 @@ struct em28xx_board em28xx_boards[] = { .amux = EM28XX_AMUX_VIDEO, .gpio = evga_indtube_analog, }, { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = evga_indtube_analog, @@ -2125,7 +2126,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -2238,7 +2239,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .amux = EM28XX_AMUX_VIDEO, .gpio = speedlink_vad_laplace_reg_seq, } }, @@ -2272,7 +2273,7 @@ struct em28xx_board em28xx_boards[] = { .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_TVP5150, .input = { { - .type = EM28XX_VMUX_COMPOSITE1, + .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { @@ -3012,6 +3013,41 @@ static void flush_request_modules(struct em28xx *dev) flush_work(&dev->request_module_wk); } +static int em28xx_media_device_init(struct em28xx *dev, + struct usb_device *udev) +{ +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_device *mdev; + + if (udev->product) { + mdev = v4l2_mc_usb_media_device_init(udev, udev->product); + } else if (udev->manufacturer) { + mdev = v4l2_mc_usb_media_device_init(udev, udev->manufacturer); + } else { + mdev = v4l2_mc_usb_media_device_init(udev, dev->name); + } + + if (!mdev) + return -ENOMEM; + + dev->media_dev = mdev; +#endif + return 0; +} + +static void em28xx_unregister_media_device(struct em28xx *dev) +{ + +#ifdef CONFIG_MEDIA_CONTROLLER + if (dev->media_dev) { + media_device_unregister(dev->media_dev); + media_device_cleanup(dev->media_dev); + kfree(dev->media_dev); + dev->media_dev = NULL; + } +#endif +} + /* * em28xx_release_resources() * unregisters the v4l2,i2c and usb devices @@ -3023,6 +3059,8 @@ static void em28xx_release_resources(struct em28xx *dev) mutex_lock(&dev->lock); + em28xx_unregister_media_device(dev); + if (dev->def_i2c_bus) em28xx_i2c_unregister(dev, 1); em28xx_i2c_unregister(dev, 0); @@ -3167,6 +3205,8 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, */ snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno); + em28xx_media_device_init(dev, udev); + if (dev->is_audio_only) { retval = em28xx_audio_setup(dev); if (retval) @@ -3467,7 +3507,7 @@ static int em28xx_usb_probe(struct usb_interface *interface, /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); - /* allocate device struct */ + /* allocate device struct and check if the device is a webcam */ mutex_init(&dev->lock); retval = em28xx_init_dev(dev, udev, interface, nr); if (retval) { @@ -3483,6 +3523,15 @@ static int em28xx_usb_probe(struct usb_interface *interface, try_bulk = usb_xfer_mode > 0; } + /* Disable V4L2 if the device doesn't have a decoder */ + if (has_video && + dev->board.decoder == EM28XX_NODECODER && !dev->board.is_webcam) { + printk(DRIVER_NAME + ": Currently, V4L2 is not supported on this model\n"); + has_video = false; + dev->has_video = false; + } + /* Select USB transfer types to use */ if (has_video) { if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk)) @@ -3501,9 +3550,14 @@ static int em28xx_usb_probe(struct usb_interface *interface, request_modules(dev); - /* Should be the last thing to do, to avoid newer udev's to - open the device before fully initializing it + /* + * Do it at the end, to reduce dynamic configuration changes during + * the device init. Yet, as request_modules() can be async, the + * topology will likely change after the load of the em28xx subdrivers. */ +#ifdef CONFIG_MEDIA_CONTROLLER + retval = media_device_register(dev->media_dev); +#endif return 0; diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index bf5c24467c65..5d209c7c54d5 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -905,6 +905,7 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module, struct em28xx *dev, struct device *device) { int result; + bool create_rf_connector = false; mutex_init(&dvb->lock); @@ -916,6 +917,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module, dev->name, result); goto fail_adapter; } +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + dvb->adapter.mdev = dev->media_dev; +#endif /* Ensure all frontends negotiate bus access */ dvb->fe[0]->ops.ts_bus_ctrl = em28xx_dvb_bus_ctrl; @@ -994,8 +998,19 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module, /* register network adapter */ dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx); + + /* If the analog part won't create RF connectors, DVB will do it */ + if (!dev->has_video || (dev->tuner_type == TUNER_ABSENT)) + create_rf_connector = true; + + result = dvb_create_media_graph(&dvb->adapter, create_rf_connector); + if (result < 0) + goto fail_create_graph; + return 0; +fail_create_graph: + dvb_net_release(&dvb->net); fail_fe_conn: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); fail_fe_mem: @@ -1656,6 +1671,9 @@ static int em28xx_dvb_init(struct em28xx *dev) memset(&si2157_config, 0, sizeof(si2157_config)); si2157_config.fe = dvb->fe[0]; si2157_config.if_port = 1; +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + si2157_config.mdev = dev->media_dev; +#endif memset(&info, 0, sizeof(struct i2c_board_info)); strlcpy(info.type, "si2157", I2C_NAME_SIZE); info.addr = 0x60; @@ -1717,6 +1735,9 @@ static int em28xx_dvb_init(struct em28xx *dev) memset(&si2157_config, 0, sizeof(si2157_config)); si2157_config.fe = dvb->fe[0]; si2157_config.if_port = 0; +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + si2157_config.mdev = dev->media_dev; +#endif memset(&info, 0, sizeof(struct i2c_board_info)); strlcpy(info.type, "si2146", I2C_NAME_SIZE); info.addr = 0x60; diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index 0e86ff423c49..f772e2612608 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -196,7 +196,6 @@ static void em28xx_wake_i2c(struct em28xx *dev) v4l2_device_call_all(v4l2_dev, 0, core, reset, 0); v4l2_device_call_all(v4l2_dev, 0, video, s_routing, INPUT(dev->ctl_input)->vmux, 0, 0); - v4l2_device_call_all(v4l2_dev, 0, video, s_stream, 0); } static int em28xx_colorlevels_set_default(struct em28xx *dev) @@ -867,6 +866,147 @@ static void res_free(struct em28xx *dev, enum v4l2_buf_type f_type) em28xx_videodbg("res: put %d\n", res_type); } +static void em28xx_v4l2_media_release(struct em28xx *dev) +{ +#ifdef CONFIG_MEDIA_CONTROLLER + int i; + + for (i = 0; i < MAX_EM28XX_INPUT; i++) { + if (!INPUT(i)->type) + return; + media_device_unregister_entity(&dev->input_ent[i]); + } +#endif +} + +/* + * Media Controller helper functions + */ + +static int em28xx_enable_analog_tuner(struct em28xx *dev) +{ +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_device *mdev = dev->media_dev; + struct em28xx_v4l2 *v4l2 = dev->v4l2; + struct media_entity *source; + struct media_link *link, *found_link = NULL; + int ret, active_links = 0; + + if (!mdev || !v4l2->decoder) + return 0; + + /* + * This will find the tuner that is connected into the decoder. + * Technically, this is not 100% correct, as the device may be + * using an analog input instead of the tuner. However, as we can't + * do DVB streaming while the DMA engine is being used for V4L2, + * this should be enough for the actual needs. + */ + list_for_each_entry(link, &v4l2->decoder->links, list) { + if (link->sink->entity == v4l2->decoder) { + found_link = link; + if (link->flags & MEDIA_LNK_FL_ENABLED) + active_links++; + break; + } + } + + if (active_links == 1 || !found_link) + return 0; + + source = found_link->source->entity; + list_for_each_entry(link, &source->links, list) { + struct media_entity *sink; + int flags = 0; + + sink = link->sink->entity; + + if (sink == v4l2->decoder) + flags = MEDIA_LNK_FL_ENABLED; + + ret = media_entity_setup_link(link, flags); + if (ret) { + pr_err("Couldn't change link %s->%s to %s. Error %d\n", + source->name, sink->name, + flags ? "enabled" : "disabled", + ret); + return ret; + } else + em28xx_videodbg("link %s->%s was %s\n", + source->name, sink->name, + flags ? "ENABLED" : "disabled"); + } +#endif + return 0; +} + +static const char * const iname[] = { + [EM28XX_VMUX_COMPOSITE] = "Composite", + [EM28XX_VMUX_SVIDEO] = "S-Video", + [EM28XX_VMUX_TELEVISION] = "Television", + [EM28XX_RADIO] = "Radio", +}; + +static void em28xx_v4l2_create_entities(struct em28xx *dev) +{ +#if defined(CONFIG_MEDIA_CONTROLLER) + struct em28xx_v4l2 *v4l2 = dev->v4l2; + int ret, i; + + /* Initialize Video, VBI and Radio pads */ + v4l2->video_pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&v4l2->vdev.entity, 1, &v4l2->video_pad); + if (ret < 0) + pr_err("failed to initialize video media entity!\n"); + + if (em28xx_vbi_supported(dev)) { + v4l2->vbi_pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&v4l2->vbi_dev.entity, 1, + &v4l2->vbi_pad); + if (ret < 0) + pr_err("failed to initialize vbi media entity!\n"); + } + + /* Webcams don't have input connectors */ + if (dev->board.is_webcam) + return; + + /* Create entities for each input connector */ + for (i = 0; i < MAX_EM28XX_INPUT; i++) { + struct media_entity *ent = &dev->input_ent[i]; + + if (!INPUT(i)->type) + break; + + ent->name = iname[INPUT(i)->type]; + ent->flags = MEDIA_ENT_FL_CONNECTOR; + dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE; + + switch (INPUT(i)->type) { + case EM28XX_VMUX_COMPOSITE: + ent->function = MEDIA_ENT_F_CONN_COMPOSITE; + break; + case EM28XX_VMUX_SVIDEO: + ent->function = MEDIA_ENT_F_CONN_SVIDEO; + break; + default: /* EM28XX_VMUX_TELEVISION or EM28XX_RADIO */ + if (dev->tuner_type != TUNER_ABSENT) + ent->function = MEDIA_ENT_F_CONN_RF; + break; + } + + ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); + if (ret < 0) + pr_err("failed to initialize input pad[%d]!\n", i); + + ret = media_device_register_entity(dev->media_dev, ent); + if (ret < 0) + pr_err("failed to register input entity %d!\n", i); + } +#endif +} + + /* ------------------------------------------------------------------ Videobuf2 operations ------------------------------------------------------------------*/ @@ -884,6 +1024,9 @@ static int queue_setup(struct vb2_queue *vq, return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; sizes[0] = size; + + em28xx_enable_analog_tuner(dev); + return 0; } @@ -962,6 +1105,9 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count) f.type = V4L2_TUNER_ANALOG_TV; v4l2_device_call_all(&v4l2->v4l2_dev, 0, tuner, s_frequency, &f); + + /* Enable video stream at TV decoder */ + v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 1); } v4l2->streaming_users++; @@ -981,6 +1127,9 @@ static void em28xx_stop_streaming(struct vb2_queue *vq) res_free(dev, vq->type); if (v4l2->streaming_users-- == 1) { + /* Disable video stream at TV decoder */ + v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 0); + /* Last active user, so shutdown all the URBS */ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE); } @@ -1013,6 +1162,9 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq) res_free(dev, vq->type); if (v4l2->streaming_users-- == 1) { + /* Disable video stream at TV decoder */ + v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 0); + /* Last active user, so shutdown all the URBS */ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE); } @@ -1224,6 +1376,12 @@ static void scale_to_size(struct em28xx *dev, *width = (((unsigned long)maxw) << 12) / (hscale + 4096L); *height = (((unsigned long)maxh) << 12) / (vscale + 4096L); + + /* Don't let width or height to be zero */ + if (*width < 1) + *width = 1; + if (*height < 1) + *height = 1; } /* ------------------------------------------------------------------ @@ -1299,6 +1457,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0); } + /* Avoid division by zero at size_to_scale */ + if (width < 1) + width = 1; + if (height < 1) + height = 1; size_to_scale(dev, width, height, &hscale, &vscale); scale_to_size(dev, hscale, vscale, &width, &height); @@ -1434,18 +1597,6 @@ static int vidioc_s_parm(struct file *file, void *priv, 0, video, s_parm, p); } -static const char *iname[] = { - [EM28XX_VMUX_COMPOSITE1] = "Composite1", - [EM28XX_VMUX_COMPOSITE2] = "Composite2", - [EM28XX_VMUX_COMPOSITE3] = "Composite3", - [EM28XX_VMUX_COMPOSITE4] = "Composite4", - [EM28XX_VMUX_SVIDEO] = "S-Video", - [EM28XX_VMUX_TELEVISION] = "Television", - [EM28XX_VMUX_CABLE] = "Cable TV", - [EM28XX_VMUX_DVB] = "DVB", - [EM28XX_VMUX_DEBUG] = "for debug only", -}; - static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { @@ -1463,8 +1614,7 @@ static int vidioc_enum_input(struct file *file, void *priv, strcpy(i->name, iname[INPUT(n)->type]); - if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) || - (EM28XX_VMUX_CABLE == INPUT(n)->type)) + if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type)) i->type = V4L2_INPUT_TYPE_TUNER; i->std = dev->v4l2->vdev.tvnorms; @@ -1961,6 +2111,8 @@ static int em28xx_v4l2_fini(struct em28xx *dev) em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE); + em28xx_v4l2_media_release(dev); + if (video_is_registered(&v4l2->radio_dev)) { em28xx_info("V4L2 device %s deregistered\n", video_device_node_name(&v4l2->radio_dev)); @@ -2284,6 +2436,9 @@ static int em28xx_v4l2_init(struct em28xx *dev) v4l2->dev = dev; dev->v4l2 = v4l2; +#ifdef CONFIG_MEDIA_CONTROLLER + v4l2->v4l2_dev.mdev = dev->media_dev; +#endif ret = v4l2_device_register(&dev->udev->dev, &v4l2->v4l2_dev); if (ret < 0) { em28xx_errdev("Call to v4l2_device_register() failed!\n"); @@ -2556,6 +2711,16 @@ static int em28xx_v4l2_init(struct em28xx *dev) video_device_node_name(&v4l2->radio_dev)); } + /* Init entities at the Media Controller */ + em28xx_v4l2_create_entities(dev); + + ret = v4l2_mc_create_media_graph(dev->media_dev); + if (ret) { + em28xx_errdev("failed to create media graph\n"); + em28xx_v4l2_media_release(dev); + goto unregister_dev; + } + em28xx_info("V4L2 video device registered as %s\n", video_device_node_name(&v4l2->vdev)); @@ -2577,6 +2742,22 @@ static int em28xx_v4l2_init(struct em28xx *dev) return 0; unregister_dev: + if (video_is_registered(&v4l2->radio_dev)) { + em28xx_info("V4L2 device %s deregistered\n", + video_device_node_name(&v4l2->radio_dev)); + video_unregister_device(&v4l2->radio_dev); + } + if (video_is_registered(&v4l2->vbi_dev)) { + em28xx_info("V4L2 device %s deregistered\n", + video_device_node_name(&v4l2->vbi_dev)); + video_unregister_device(&v4l2->vbi_dev); + } + if (video_is_registered(&v4l2->vdev)) { + em28xx_info("V4L2 device %s deregistered\n", + video_device_node_name(&v4l2->vdev)); + video_unregister_device(&v4l2->vdev); + } + v4l2_ctrl_handler_free(&v4l2->ctrl_handler); v4l2_device_unregister(&v4l2->v4l2_dev); err: diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 8ff066c977d9..267444961775 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -26,7 +26,7 @@ #ifndef _EM28XX_H #define _EM28XX_H -#define EM28XX_VERSION "0.2.1" +#define EM28XX_VERSION "0.2.2" #define DRIVER_DESC "Empia em28xx device driver" #include <linux/workqueue.h> @@ -291,15 +291,9 @@ struct em28xx_dmaqueue { #define MAX_EM28XX_INPUT 4 enum enum28xx_itype { - EM28XX_VMUX_COMPOSITE1 = 1, - EM28XX_VMUX_COMPOSITE2, - EM28XX_VMUX_COMPOSITE3, - EM28XX_VMUX_COMPOSITE4, + EM28XX_VMUX_COMPOSITE = 1, EM28XX_VMUX_SVIDEO, EM28XX_VMUX_TELEVISION, - EM28XX_VMUX_CABLE, - EM28XX_VMUX_DVB, - EM28XX_VMUX_DEBUG, EM28XX_RADIO, }; @@ -558,6 +552,11 @@ struct em28xx_v4l2 { bool top_field; int vbi_read; unsigned int field_count; + +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_pad video_pad, vbi_pad; + struct media_entity *decoder; +#endif }; struct em28xx_audio { @@ -718,6 +717,12 @@ struct em28xx { /* Snapshot button input device */ char snapshot_button_path[30]; /* path of the input dev */ struct input_dev *sbutton_input_dev; + +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_device *media_dev; + struct media_entity input_ent[MAX_EM28XX_INPUT]; + struct media_pad input_pad[MAX_EM28XX_INPUT]; +#endif }; #define kref_to_dev(d) container_of(d, struct em28xx, ref) diff --git a/drivers/media/usb/go7007/go7007-priv.h b/drivers/media/usb/go7007/go7007-priv.h index 745185eb060b..bebee8ca9981 100644 --- a/drivers/media/usb/go7007/go7007-priv.h +++ b/drivers/media/usb/go7007/go7007-priv.h @@ -250,7 +250,7 @@ struct go7007 { struct i2c_adapter i2c_adapter; /* HPI driver */ - struct go7007_hpi_ops *hpi_ops; + const struct go7007_hpi_ops *hpi_ops; void *hpi_context; int interrupt_available; wait_queue_head_t interrupt_waitq; diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c index 3dbf14c85c5c..14d3f8c1ce4a 100644 --- a/drivers/media/usb/go7007/go7007-usb.c +++ b/drivers/media/usb/go7007/go7007-usb.c @@ -932,7 +932,7 @@ static void go7007_usb_release(struct go7007 *go) kfree(go->hpi_context); } -static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = { +static const struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = { .interface_reset = go7007_usb_interface_reset, .write_interrupt = go7007_usb_ezusb_write_interrupt, .read_interrupt = go7007_usb_read_interrupt, @@ -942,7 +942,7 @@ static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = { .release = go7007_usb_release, }; -static struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = { +static const struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = { .interface_reset = go7007_usb_interface_reset, .write_interrupt = go7007_usb_onboard_write_interrupt, .read_interrupt = go7007_usb_read_interrupt, diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index 3fc64197b4e6..08f0ca7aa012 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -273,7 +273,9 @@ static int hdpvr_probe(struct usb_interface *interface, struct hdpvr_device *dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; +#if IS_ENABLED(CONFIG_I2C) struct i2c_client *client; +#endif size_t buffer_size; int i; int retval = -ENOMEM; diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 7dee22deebf3..ba7f02270c83 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -462,10 +462,8 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count, } if (wait_event_interruptible(dev->wait_data, - buf->status == BUFSTAT_READY)) { - ret = -ERESTARTSYS; - goto err; - } + buf->status == BUFSTAT_READY)) + return -ERESTARTSYS; } if (buf->status != BUFSTAT_READY) diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c index c104315fdc17..2d33033682af 100644 --- a/drivers/media/usb/msi2500/msi2500.c +++ b/drivers/media/usb/msi2500/msi2500.c @@ -839,8 +839,6 @@ static int msi2500_set_usb_adc(struct msi2500_dev *dev) goto err; ret = msi2500_ctrl_msg(dev, CMD_WREG, reg3); - if (ret) - goto err; err: return ret; } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c index fd888a604462..c45f30715dcd 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-context.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c @@ -196,7 +196,7 @@ int pvr2_context_global_init(void) pvr2_context_thread_ptr = kthread_run(pvr2_context_thread_func, NULL, "pvrusb2-context"); - return (pvr2_context_thread_ptr ? 0 : -ENOMEM); + return IS_ERR(pvr2_context_thread_ptr) ? -ENOMEM : 0; } diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c index 086cf1c7bd7d..18aed5dd325e 100644 --- a/drivers/media/usb/pwc/pwc-if.c +++ b/drivers/media/usb/pwc/pwc-if.c @@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = { { USB_DEVICE(0x0471, 0x0312) }, { USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */ { USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */ + { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */ { USB_DEVICE(0x069A, 0x0001) }, /* Askey */ { USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */ { USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */ @@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id name = "Philips SPC 900NC webcam"; type_id = 740; break; + case 0x032C: + PWC_INFO("Philips SPC 880NC USB webcam detected.\n"); + name = "Philips SPC 880NC webcam"; + type_id = 740; + break; default: return -ENODEV; break; diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index 8abbd3cc8eba..4dac499ed28e 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -27,6 +27,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>. #include <linux/firmware.h> #include <linux/slab.h> #include <linux/module.h> +#include <media/v4l2-mc.h> #include "sms-cards.h" #include "smsendian.h" @@ -51,6 +52,9 @@ struct smsusb_urb_t { struct smsusb_device_t *dev; struct urb urb; + + /* For the bottom half */ + struct work_struct wq; }; struct smsusb_device_t { @@ -71,6 +75,18 @@ static int smsusb_submit_urb(struct smsusb_device_t *dev, struct smsusb_urb_t *surb); /** + * Completing URB's callback handler - bottom half (proccess context) + * submits the URB prepared on smsusb_onresponse() + */ +static void do_submit_urb(struct work_struct *work) +{ + struct smsusb_urb_t *surb = container_of(work, struct smsusb_urb_t, wq); + struct smsusb_device_t *dev = surb->dev; + + smsusb_submit_urb(dev, surb); +} + +/** * Completing URB's callback handler - top half (interrupt context) * adds completing sms urb to the global surbs list and activtes the worker * thread the surb @@ -138,13 +154,15 @@ static void smsusb_onresponse(struct urb *urb) exit_and_resubmit: - smsusb_submit_urb(dev, surb); + INIT_WORK(&surb->wq, do_submit_urb); + schedule_work(&surb->wq); } static int smsusb_submit_urb(struct smsusb_device_t *dev, struct smsusb_urb_t *surb) { if (!surb->cb) { + /* This function can sleep */ surb->cb = smscore_getbuffer(dev->coredev); if (!surb->cb) { pr_err("smscore_getbuffer(...) returned NULL\n"); @@ -349,20 +367,10 @@ static void *siano_media_device_register(struct smsusb_device_t *dev, struct sms_board *board = sms_get_board(board_id); int ret; - mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + mdev = v4l2_mc_usb_media_device_init(udev, board->name); if (!mdev) return NULL; - mdev->dev = &udev->dev; - strlcpy(mdev->model, board->name, sizeof(mdev->model)); - if (udev->serial) - strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial)); - strcpy(mdev->bus_info, udev->devpath); - mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - mdev->driver_version = LINUX_VERSION_CODE; - - media_device_init(mdev); - ret = media_device_register(mdev); if (ret) { media_device_cleanup(mdev); diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c index 46191d5262eb..6ecb0b48423f 100644 --- a/drivers/media/usb/stk1160/stk1160-video.c +++ b/drivers/media/usb/stk1160/stk1160-video.c @@ -98,7 +98,6 @@ void stk1160_buffer_done(struct stk1160 *dev) buf->vb.sequence = dev->sequence++; buf->vb.field = V4L2_FIELD_INTERLACED; - buf->vb.vb2_buf.planes[0].bytesused = buf->bytesused; buf->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->bytesused); diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index 4ebb33943f9a..f6cfad46547e 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -312,20 +312,24 @@ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk) usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd); usbtv->chunks_done++; - /* Last chunk in a frame, signalling an end */ - if (odd && chunk_no == usbtv->n_chunks-1) { - int size = vb2_plane_size(&buf->vb.vb2_buf, 0); - enum vb2_buffer_state state = usbtv->chunks_done == - usbtv->n_chunks ? - VB2_BUF_STATE_DONE : - VB2_BUF_STATE_ERROR; - - buf->vb.field = V4L2_FIELD_INTERLACED; - buf->vb.sequence = usbtv->sequence++; - buf->vb.vb2_buf.timestamp = ktime_get_ns(); - vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); - vb2_buffer_done(&buf->vb.vb2_buf, state); - list_del(&buf->list); + /* Last chunk in a field */ + if (chunk_no == usbtv->n_chunks-1) { + /* Last chunk in a frame, signalling an end */ + if (odd && !usbtv->last_odd) { + int size = vb2_plane_size(&buf->vb.vb2_buf, 0); + enum vb2_buffer_state state = usbtv->chunks_done == + usbtv->n_chunks ? + VB2_BUF_STATE_DONE : + VB2_BUF_STATE_ERROR; + + buf->vb.field = V4L2_FIELD_INTERLACED; + buf->vb.sequence = usbtv->sequence++; + buf->vb.vb2_buf.timestamp = ktime_get_ns(); + vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); + vb2_buffer_done(&buf->vb.vb2_buf, state); + list_del(&buf->list); + } + usbtv->last_odd = odd; } spin_unlock_irqrestore(&usbtv->buflock, flags); @@ -389,6 +393,10 @@ static struct urb *usbtv_setup_iso_transfer(struct usbtv *usbtv) ip->transfer_flags = URB_ISO_ASAP; ip->transfer_buffer = kzalloc(size * USBTV_ISOC_PACKETS, GFP_KERNEL); + if (!ip->transfer_buffer) { + usb_free_urb(ip); + return NULL; + } ip->complete = usbtv_iso_cb; ip->number_of_packets = USBTV_ISOC_PACKETS; ip->transfer_buffer_length = size * USBTV_ISOC_PACKETS; @@ -639,6 +647,7 @@ static int usbtv_start_streaming(struct vb2_queue *vq, unsigned int count) if (usbtv->udev == NULL) return -ENODEV; + usbtv->last_odd = 1; usbtv->sequence = 0; return usbtv_start(usbtv); } diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h index 19cb8bf7c4e9..161b38d5cfa0 100644 --- a/drivers/media/usb/usbtv/usbtv.h +++ b/drivers/media/usb/usbtv/usbtv.h @@ -95,6 +95,7 @@ struct usbtv { int width, height; int n_chunks; int iso_size; + int last_odd; unsigned int sequence; struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS]; diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index de9ff3bb8edd..12f5ebbd0436 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c @@ -162,8 +162,7 @@ MODULE_ALIAS(DRIVER_ALIAS); static inline struct usb_usbvision *cd_to_usbvision(struct device *cd) { - struct video_device *vdev = - container_of(cd, struct video_device, dev); + struct video_device *vdev = to_video_device(cd); return video_get_drvdata(vdev); } @@ -177,8 +176,7 @@ static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); static ssize_t show_model(struct device *cd, struct device_attribute *attr, char *buf) { - struct video_device *vdev = - container_of(cd, struct video_device, dev); + struct video_device *vdev = to_video_device(cd); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%s\n", usbvision_device_data[usbvision->dev_model].model_string); @@ -188,8 +186,7 @@ static DEVICE_ATTR(model, S_IRUGO, show_model, NULL); static ssize_t show_hue(struct device *cd, struct device_attribute *attr, char *buf) { - struct video_device *vdev = - container_of(cd, struct video_device, dev); + struct video_device *vdev = to_video_device(cd); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_HUE; @@ -203,8 +200,7 @@ static DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL); static ssize_t show_contrast(struct device *cd, struct device_attribute *attr, char *buf) { - struct video_device *vdev = - container_of(cd, struct video_device, dev); + struct video_device *vdev = to_video_device(cd); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_CONTRAST; @@ -218,8 +214,7 @@ static DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL); static ssize_t show_brightness(struct device *cd, struct device_attribute *attr, char *buf) { - struct video_device *vdev = - container_of(cd, struct video_device, dev); + struct video_device *vdev = to_video_device(cd); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_BRIGHTNESS; @@ -233,8 +228,7 @@ static DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL); static ssize_t show_saturation(struct device *cd, struct device_attribute *attr, char *buf) { - struct video_device *vdev = - container_of(cd, struct video_device, dev); + struct video_device *vdev = to_video_device(cd); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_SATURATION; @@ -248,8 +242,7 @@ static DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL); static ssize_t show_streaming(struct device *cd, struct device_attribute *attr, char *buf) { - struct video_device *vdev = - container_of(cd, struct video_device, dev); + struct video_device *vdev = to_video_device(cd); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%s\n", YES_NO(usbvision->streaming == stream_on ? 1 : 0)); @@ -259,8 +252,7 @@ static DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL); static ssize_t show_compression(struct device *cd, struct device_attribute *attr, char *buf) { - struct video_device *vdev = - container_of(cd, struct video_device, dev); + struct video_device *vdev = to_video_device(cd); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%s\n", YES_NO(usbvision->isoc_mode == ISOC_MODE_COMPRESS)); @@ -270,8 +262,7 @@ static DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL); static ssize_t show_device_bridge(struct device *cd, struct device_attribute *attr, char *buf) { - struct video_device *vdev = - container_of(cd, struct video_device, dev); + struct video_device *vdev = to_video_device(cd); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%d\n", usbvision->bridge_type); } @@ -1156,6 +1147,7 @@ static int usbvision_radio_close(struct file *file) usbvision_audio_off(usbvision); usbvision->radio = 0; usbvision->user--; + mutex_unlock(&usbvision->v4l2_lock); if (usbvision->remove_pending) { printk(KERN_INFO "%s: Final disconnect\n", __func__); @@ -1164,7 +1156,6 @@ static int usbvision_radio_close(struct file *file) return 0; } - mutex_unlock(&usbvision->v4l2_lock); PDEBUG(DBG_IO, "success"); return v4l2_fh_release(file); } diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index 9beece00869b..29b3436d0910 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -37,7 +37,6 @@ config VIDEO_PCI_SKELETON # Used by drivers that need tuner.ko config VIDEO_TUNER tristate - depends on MEDIA_TUNER # Used by drivers that need v4l2-mem2mem.ko config V4L2_MEM2MEM_DEV diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index 1dc8bba2b198..795a5352761d 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile @@ -16,6 +16,7 @@ endif ifeq ($(CONFIG_TRACEPOINTS),y) videodev-objs += vb2-trace.o v4l2-trace.o endif +videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o obj-$(CONFIG_VIDEO_V4L2) += videodev.o obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c index 76496fd282aa..731487be5baa 100644 --- a/drivers/media/v4l2-core/tuner-core.c +++ b/drivers/media/v4l2-core/tuner-core.c @@ -696,16 +696,32 @@ static int tuner_probe(struct i2c_client *client, /* Should be just before return */ register_client: #if defined(CONFIG_MEDIA_CONTROLLER) - t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK; - t->pad[TUNER_PAD_IF_OUTPUT].flags = MEDIA_PAD_FL_SOURCE; - t->sd.entity.function = MEDIA_ENT_F_TUNER; t->sd.entity.name = t->name; + /* + * Handle the special case where the tuner has actually + * two stages: the PLL to tune into a frequency and the + * IF-PLL demodulator (tda988x). + */ + if (t->type == TUNER_TDA9887) { + t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK; + t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&t->sd.entity, + IF_VID_DEC_PAD_NUM_PADS, + &t->pad[0]); + t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER; + } else { + t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK; + t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE; + t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS, + &t->pad[0]); + t->sd.entity.function = MEDIA_ENT_F_TUNER; + } - ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS, &t->pad[0]); if (ret < 0) { tuner_err("failed to initialize media entity!\n"); kfree(t); - return -ENODEV; + return ret; } #endif /* Sets a default mode */ diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c index 5bada202b2d3..a4b224d92572 100644 --- a/drivers/media/v4l2-core/v4l2-async.c +++ b/drivers/media/v4l2-core/v4l2-async.c @@ -119,6 +119,13 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier, return ret; } + ret = v4l2_subdev_call(sd, core, registered_async); + if (ret < 0 && ret != -ENOIOCTLCMD) { + if (notifier->unbind) + notifier->unbind(notifier, sd, asd); + return ret; + } + if (list_empty(¬ifier->waiting) && notifier->complete) return notifier->complete(notifier); diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 8fd84a67478a..019644ff627d 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user get_user(kp->index, &up->index) || get_user(kp->type, &up->type) || get_user(kp->flags, &up->flags) || - get_user(kp->memory, &up->memory)) + get_user(kp->memory, &up->memory) || + get_user(kp->length, &up->length)) return -EFAULT; if (V4L2_TYPE_IS_OUTPUT(kp->type)) @@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user return -EFAULT; if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { - if (get_user(kp->length, &up->length)) - return -EFAULT; - num_planes = kp->length; if (num_planes == 0) { kp->m.planes = NULL; @@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user } else { switch (kp->memory) { case V4L2_MEMORY_MMAP: - if (get_user(kp->length, &up->length) || - get_user(kp->m.offset, &up->m.offset)) + if (get_user(kp->m.offset, &up->m.offset)) return -EFAULT; break; case V4L2_MEMORY_USERPTR: { compat_long_t tmp; - if (get_user(kp->length, &up->length) || - get_user(tmp, &up->m.userptr)) + if (get_user(tmp, &up->m.userptr)) return -EFAULT; kp->m.userptr = (unsigned long)compat_ptr(tmp); @@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) || put_user(kp->sequence, &up->sequence) || put_user(kp->reserved2, &up->reserved2) || - put_user(kp->reserved, &up->reserved)) + put_user(kp->reserved, &up->reserved) || + put_user(kp->length, &up->length)) return -EFAULT; if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { @@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user } else { switch (kp->memory) { case V4L2_MEMORY_MMAP: - if (put_user(kp->length, &up->length) || - put_user(kp->m.offset, &up->m.offset)) + if (put_user(kp->m.offset, &up->m.offset)) return -EFAULT; break; case V4L2_MEMORY_USERPTR: - if (put_user(kp->length, &up->length) || - put_user(kp->m.userptr, &up->m.userptr)) + if (put_user(kp->m.userptr, &up->m.userptr)) return -EFAULT; break; case V4L2_MEMORY_OVERLAY: diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index c9d5537b6af7..8b321e0aae62 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -462,6 +462,14 @@ const char * const *v4l2_ctrl_get_menu(u32 id) "RGB full range (0-255)", NULL, }; + static const char * const dv_it_content_type[] = { + "Graphics", + "Photo", + "Cinema", + "Game", + "No IT Content", + NULL, + }; static const char * const detect_md_mode[] = { "Disabled", "Global", @@ -560,6 +568,9 @@ const char * const *v4l2_ctrl_get_menu(u32 id) case V4L2_CID_DV_TX_RGB_RANGE: case V4L2_CID_DV_RX_RGB_RANGE: return dv_rgb_range; + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: + return dv_it_content_type; case V4L2_CID_DETECT_MD_MODE: return detect_md_mode; @@ -747,6 +758,7 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range"; case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range"; case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header"; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame"; /* VPX controls */ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions"; @@ -881,8 +893,10 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present"; case V4L2_CID_DV_TX_MODE: return "Transmit Mode"; case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range"; + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type"; case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present"; case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range"; + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type"; case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls"; case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis"; @@ -985,6 +999,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: *type = V4L2_CTRL_TYPE_INTEGER; break; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: case V4L2_CID_PAN_RESET: case V4L2_CID_TILT_RESET: case V4L2_CID_FLASH_STROBE: @@ -1038,7 +1053,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, case V4L2_CID_SCENE_MODE: case V4L2_CID_DV_TX_MODE: case V4L2_CID_DV_TX_RGB_RANGE: + case V4L2_CID_DV_TX_IT_CONTENT_TYPE: case V4L2_CID_DV_RX_RGB_RANGE: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: case V4L2_CID_TEST_PATTERN: case V4L2_CID_TUNE_DEEMPHASIS: case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: @@ -1185,6 +1202,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, case V4L2_CID_DV_TX_RXSENSE: case V4L2_CID_DV_TX_EDID_PRESENT: case V4L2_CID_DV_RX_POWER_PRESENT: + case V4L2_CID_DV_RX_IT_CONTENT_TYPE: case V4L2_CID_RDS_RX_PTY: case V4L2_CID_RDS_RX_PS_NAME: case V4L2_CID_RDS_RX_RADIO_TEXT: @@ -2211,22 +2229,6 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, } EXPORT_SYMBOL(v4l2_ctrl_new_int_menu); -/* Add a control from another handler to this handler */ -struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl, - struct v4l2_ctrl *ctrl) -{ - if (hdl == NULL || hdl->error) - return NULL; - if (ctrl == NULL) { - handler_set_err(hdl, -EINVAL); - return NULL; - } - if (ctrl->handler == hdl) - return ctrl; - return handler_new_ref(hdl, ctrl) ? NULL : ctrl; -} -EXPORT_SYMBOL(v4l2_ctrl_add_ctrl); - /* Add the controls from another handler to our own. */ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, struct v4l2_ctrl_handler *add, diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index ec258b73001a..889de0a32152 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -165,7 +165,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, bt->width > cap->max_width || bt->pixelclock < cap->min_pixelclock || bt->pixelclock > cap->max_pixelclock || - (cap->standards && bt->standards && + (!(caps & V4L2_DV_BT_CAP_CUSTOM) && + cap->standards && bt->standards && !(bt->standards & cap->standards)) || (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) || (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE))) diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 8a018c6dd16a..14843090fd61 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1191,6 +1191,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/CbCr 4:2:0 (16x16 MB, N-C)"; break; case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break; case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break; + case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break; + case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break; + case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break; case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break; case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break; diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c new file mode 100644 index 000000000000..a7f41b323522 --- /dev/null +++ b/drivers/media/v4l2-core/v4l2-mc.c @@ -0,0 +1,258 @@ +/* + * Media Controller ancillary functions + * + * (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/usb.h> +#include <media/media-entity.h> +#include <media/v4l2-mc.h> + + +struct media_device *v4l2_mc_pci_media_device_init(struct pci_dev *pci_dev, + const char *name) +{ +#ifdef CONFIG_PCI + struct media_device *mdev; + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return NULL; + + mdev->dev = &pci_dev->dev; + + if (name) + strlcpy(mdev->model, name, sizeof(mdev->model)); + else + strlcpy(mdev->model, pci_name(pci_dev), sizeof(mdev->model)); + + sprintf(mdev->bus_info, "PCI:%s", pci_name(pci_dev)); + + mdev->hw_revision = pci_dev->subsystem_vendor << 16 + || pci_dev->subsystem_device; + + mdev->driver_version = LINUX_VERSION_CODE; + + media_device_init(mdev); + + return mdev; +#else + return NULL; +#endif +} +EXPORT_SYMBOL_GPL(v4l2_mc_pci_media_device_init); + +struct media_device *__v4l2_mc_usb_media_device_init(struct usb_device *udev, + const char *board_name, + const char *driver_name) +{ +#ifdef CONFIG_USB + struct media_device *mdev; + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return NULL; + + mdev->dev = &udev->dev; + + if (driver_name) + strlcpy(mdev->driver_name, driver_name, + sizeof(mdev->driver_name)); + + if (board_name) + strlcpy(mdev->model, board_name, sizeof(mdev->model)); + else if (udev->product) + strlcpy(mdev->model, udev->product, sizeof(mdev->model)); + else + strlcpy(mdev->model, "unknown model", sizeof(mdev->model)); + if (udev->serial) + strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial)); + usb_make_path(udev, mdev->bus_info, sizeof(mdev->bus_info)); + mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); + mdev->driver_version = LINUX_VERSION_CODE; + + media_device_init(mdev); + + return mdev; +#else + return NULL; +#endif +} +EXPORT_SYMBOL_GPL(__v4l2_mc_usb_media_device_init); + +int v4l2_mc_create_media_graph(struct media_device *mdev) + +{ + struct media_entity *entity; + struct media_entity *if_vid = NULL, *if_aud = NULL; + struct media_entity *tuner = NULL, *decoder = NULL; + struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL; + bool is_webcam = false; + u32 flags; + int ret; + + if (!mdev) + return 0; + + media_device_for_each_entity(entity, mdev) { + switch (entity->function) { + case MEDIA_ENT_F_IF_VID_DECODER: + if_vid = entity; + break; + case MEDIA_ENT_F_IF_AUD_DECODER: + if_aud = entity; + break; + case MEDIA_ENT_F_TUNER: + tuner = entity; + break; + case MEDIA_ENT_F_ATV_DECODER: + decoder = entity; + break; + case MEDIA_ENT_F_IO_V4L: + io_v4l = entity; + break; + case MEDIA_ENT_F_IO_VBI: + io_vbi = entity; + break; + case MEDIA_ENT_F_IO_SWRADIO: + io_swradio = entity; + break; + case MEDIA_ENT_F_CAM_SENSOR: + is_webcam = true; + break; + } + } + + /* It should have at least one I/O entity */ + if (!io_v4l && !io_vbi && !io_swradio) + return -EINVAL; + + /* + * Here, webcams are modelled on a very simple way: the sensor is + * connected directly to the I/O entity. All dirty details, like + * scaler and crop HW are hidden. While such mapping is not enough + * for mc-centric hardware, it is enough for v4l2 interface centric + * PC-consumer's hardware. + */ + if (is_webcam) { + if (!io_v4l) + return -EINVAL; + + media_device_for_each_entity(entity, mdev) { + if (entity->function != MEDIA_ENT_F_CAM_SENSOR) + continue; + ret = media_create_pad_link(entity, 0, + io_v4l, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } + if (!decoder) + return 0; + } + + /* The device isn't a webcam. So, it should have a decoder */ + if (!decoder) + return -EINVAL; + + /* Link the tuner and IF video output pads */ + if (tuner) { + if (if_vid) { + ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT, + if_vid, + IF_VID_DEC_PAD_IF_INPUT, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + ret = media_create_pad_link(if_vid, IF_VID_DEC_PAD_OUT, + decoder, DEMOD_PAD_IF_INPUT, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } else { + ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT, + decoder, DEMOD_PAD_IF_INPUT, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } + + if (if_aud) { + ret = media_create_pad_link(tuner, TUNER_PAD_AUD_OUT, + if_aud, + IF_AUD_DEC_PAD_IF_INPUT, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } else { + if_aud = tuner; + } + + } + + /* Create demod to V4L, VBI and SDR radio links */ + if (io_v4l) { + ret = media_create_pad_link(decoder, DEMOD_PAD_VID_OUT, + io_v4l, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } + + if (io_swradio) { + ret = media_create_pad_link(decoder, DEMOD_PAD_VID_OUT, + io_swradio, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } + + if (io_vbi) { + ret = media_create_pad_link(decoder, DEMOD_PAD_VBI_OUT, + io_vbi, 0, + MEDIA_LNK_FL_ENABLED); + if (ret) + return ret; + } + + /* Create links for the media connectors */ + flags = MEDIA_LNK_FL_ENABLED; + media_device_for_each_entity(entity, mdev) { + switch (entity->function) { + case MEDIA_ENT_F_CONN_RF: + if (!tuner) + continue; + + ret = media_create_pad_link(entity, 0, tuner, + TUNER_PAD_RF_INPUT, + flags); + break; + case MEDIA_ENT_F_CONN_SVIDEO: + case MEDIA_ENT_F_CONN_COMPOSITE: + ret = media_create_pad_link(entity, 0, decoder, + DEMOD_PAD_IF_INPUT, + flags); + break; + default: + continue; + } + if (ret) + return ret; + + flags = 0; + } + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph); diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c index b27cbb1f5afe..93b33681776c 100644 --- a/drivers/media/v4l2-core/v4l2-of.c +++ b/drivers/media/v4l2-core/v4l2-of.c @@ -146,7 +146,7 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node, * variable without a low fixed limit. Please use * v4l2_of_alloc_parse_endpoint() in new drivers instead. * - * Return: 0. + * Return: 0 on success or a negative error code on failure. */ int v4l2_of_parse_endpoint(const struct device_node *node, struct v4l2_of_endpoint *endpoint) diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index ff8953ae52d1..dab94080ec3a 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -1227,6 +1227,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb) if (planes[plane].length < vb->planes[plane].min_length) { dprintk(1, "invalid dmabuf length for plane %d\n", plane); + dma_buf_put(dbuf); ret = -EINVAL; goto err; } diff --git a/drivers/media/v4l2-core/videobuf2-dvb.c b/drivers/media/v4l2-core/videobuf2-dvb.c index d09269846b7e..9f38b4218c0d 100644 --- a/drivers/media/v4l2-core/videobuf2-dvb.c +++ b/drivers/media/v4l2-core/videobuf2-dvb.c @@ -77,6 +77,7 @@ static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe, struct module *module, void *adapter_priv, struct device *device, + struct media_device *mdev, char *adapter_name, short *adapter_nr, int mfe_shared) @@ -94,7 +95,10 @@ static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe, } fe->adapter.priv = adapter_priv; fe->adapter.mfe_shared = mfe_shared; - +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + if (mdev) + fe->adapter.mdev = mdev; +#endif return result; } @@ -193,6 +197,7 @@ int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, struct module *module, void *adapter_priv, struct device *device, + struct media_device *mdev, short *adapter_nr, int mfe_shared) { @@ -207,7 +212,7 @@ int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, } /* Bring up the adapter */ - res = vb2_dvb_register_adapter(f, module, adapter_priv, device, + res = vb2_dvb_register_adapter(f, module, adapter_priv, device, mdev, fe->dvb.name, adapter_nr, mfe_shared); if (res < 0) { pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res); @@ -224,7 +229,11 @@ int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, fe->dvb.name, res); goto err; } + res = dvb_create_media_graph(&f->adapter, false); + if (res < 0) + goto err; } + mutex_unlock(&f->lock); return 0; diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index f236250ac106..4034d2d4c507 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -19,6 +19,7 @@ #include <linux/acpi.h> #include <linux/pnp.h> #include <linux/apple_bl.h> +#include <linux/apple-gmux.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/pci.h> @@ -57,7 +58,9 @@ struct apple_gmux_data { /* switcheroo data */ acpi_handle dhandle; int gpe; - enum vga_switcheroo_client_id resume_client_id; + enum vga_switcheroo_client_id switch_state_display; + enum vga_switcheroo_client_id switch_state_ddc; + enum vga_switcheroo_client_id switch_state_external; enum vga_switcheroo_state power_state; struct completion powerchange_done; }; @@ -368,19 +371,70 @@ static const struct backlight_ops gmux_bl_ops = { * for the selected GPU. */ +static void gmux_read_switch_state(struct apple_gmux_data *gmux_data) +{ + if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DDC) == 1) + gmux_data->switch_state_ddc = VGA_SWITCHEROO_IGD; + else + gmux_data->switch_state_ddc = VGA_SWITCHEROO_DIS; + + if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2) + gmux_data->switch_state_display = VGA_SWITCHEROO_IGD; + else + gmux_data->switch_state_display = VGA_SWITCHEROO_DIS; + + if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL) == 2) + gmux_data->switch_state_external = VGA_SWITCHEROO_IGD; + else + gmux_data->switch_state_external = VGA_SWITCHEROO_DIS; +} + +static void gmux_write_switch_state(struct apple_gmux_data *gmux_data) +{ + if (gmux_data->switch_state_ddc == VGA_SWITCHEROO_IGD) + gmux_write8(gmux_data, GMUX_PORT_SWITCH_DDC, 1); + else + gmux_write8(gmux_data, GMUX_PORT_SWITCH_DDC, 2); + + if (gmux_data->switch_state_display == VGA_SWITCHEROO_IGD) + gmux_write8(gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2); + else + gmux_write8(gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3); + + if (gmux_data->switch_state_external == VGA_SWITCHEROO_IGD) + gmux_write8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2); + else + gmux_write8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3); +} + static int gmux_switchto(enum vga_switcheroo_client_id id) { - if (id == VGA_SWITCHEROO_IGD) { + apple_gmux_data->switch_state_ddc = id; + apple_gmux_data->switch_state_display = id; + apple_gmux_data->switch_state_external = id; + + gmux_write_switch_state(apple_gmux_data); + + return 0; +} + +static int gmux_switch_ddc(enum vga_switcheroo_client_id id) +{ + enum vga_switcheroo_client_id old_ddc_owner = + apple_gmux_data->switch_state_ddc; + + if (id == old_ddc_owner) + return id; + + pr_debug("Switching DDC from %d to %d\n", old_ddc_owner, id); + apple_gmux_data->switch_state_ddc = id; + + if (id == VGA_SWITCHEROO_IGD) gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1); - gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2); - gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2); - } else { + else gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2); - gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3); - gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3); - } - return 0; + return old_ddc_owner; } /** @@ -440,17 +494,15 @@ static int gmux_get_client_id(struct pci_dev *pdev) return VGA_SWITCHEROO_DIS; } -static enum vga_switcheroo_client_id -gmux_active_client(struct apple_gmux_data *gmux_data) -{ - if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2) - return VGA_SWITCHEROO_IGD; - - return VGA_SWITCHEROO_DIS; -} +static const struct vga_switcheroo_handler gmux_handler_indexed = { + .switchto = gmux_switchto, + .power_state = gmux_set_power_state, + .get_client_id = gmux_get_client_id, +}; -static const struct vga_switcheroo_handler gmux_handler = { +static const struct vga_switcheroo_handler gmux_handler_classic = { .switchto = gmux_switchto, + .switch_ddc = gmux_switch_ddc, .power_state = gmux_set_power_state, .get_client_id = gmux_get_client_id, }; @@ -513,7 +565,6 @@ static int gmux_suspend(struct device *dev) struct pnp_dev *pnp = to_pnp_dev(dev); struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); - gmux_data->resume_client_id = gmux_active_client(gmux_data); gmux_disable_interrupts(gmux_data); return 0; } @@ -524,7 +575,7 @@ static int gmux_resume(struct device *dev) struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); gmux_enable_interrupts(gmux_data); - gmux_switchto(gmux_data->resume_client_id); + gmux_write_switch_state(gmux_data); if (gmux_data->power_state == VGA_SWITCHEROO_OFF) gmux_set_discrete_state(gmux_data, gmux_data->power_state); return 0; @@ -704,9 +755,23 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) apple_gmux_data = gmux_data; init_completion(&gmux_data->powerchange_done); gmux_enable_interrupts(gmux_data); + gmux_read_switch_state(gmux_data); - if (vga_switcheroo_register_handler(&gmux_handler)) { - ret = -ENODEV; + /* + * Retina MacBook Pros cannot switch the panel's AUX separately + * and need eDP pre-calibration. They are distinguishable from + * pre-retinas by having an "indexed" gmux. + * + * Pre-retina MacBook Pros can switch the panel's DDC separately. + */ + if (gmux_data->indexed) + ret = vga_switcheroo_register_handler(&gmux_handler_indexed, + VGA_SWITCHEROO_NEEDS_EDP_CONFIG); + else + ret = vga_switcheroo_register_handler(&gmux_handler_classic, + VGA_SWITCHEROO_CAN_SWITCH_DDC); + if (ret) { + pr_err("Failed to register vga_switcheroo handler\n"); goto err_register_handler; } @@ -764,7 +829,7 @@ static void gmux_remove(struct pnp_dev *pnp) } static const struct pnp_device_id gmux_device_ids[] = { - {"APP000B", 0}, + {GMUX_ACPI_HID, 0}, {"", 0} }; diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index e237e9f3312d..0754a37c9674 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1057,8 +1057,7 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, { } -static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, - size_t len, +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; @@ -1076,8 +1075,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, return PTR_ERR_OR_ZERO(vaddr); } -static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, - size_t len, +static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c index b8dcf5a26cc4..da34bc12cd7c 100644 --- a/drivers/staging/android/ion/ion_test.c +++ b/drivers/staging/android/ion/ion_test.c @@ -109,7 +109,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, if (offset > dma_buf->size || size > dma_buf->size - offset) return -EINVAL; - ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir); + ret = dma_buf_begin_cpu_access(dma_buf, dir); if (ret) return ret; @@ -139,7 +139,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, copy_offset = 0; } err: - dma_buf_end_cpu_access(dma_buf, offset, size, dir); + dma_buf_end_cpu_access(dma_buf, dir); return ret; } diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig index 14697686eea5..d48a5c29c417 100644 --- a/drivers/staging/media/Kconfig +++ b/drivers/staging/media/Kconfig @@ -31,6 +31,8 @@ source "drivers/staging/media/mn88473/Kconfig" source "drivers/staging/media/omap4iss/Kconfig" +source "drivers/staging/media/timb/Kconfig" + # Keep LIRC at the end, as it has sub-menus source "drivers/staging/media/lirc/Kconfig" diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile index 34c557b4c6d6..fb94f045c40f 100644 --- a/drivers/staging/media/Makefile +++ b/drivers/staging/media/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/ obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/ obj-$(CONFIG_DVB_MN88472) += mn88472/ obj-$(CONFIG_DVB_MN88473) += mn88473/ +obj-$(CONFIG_VIDEO_TIMBERDALE) += timb/ diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h index 7b7e7b26c1e8..3cc9be776f8b 100644 --- a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h +++ b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h @@ -538,7 +538,7 @@ struct vpfe_isif_raw_config { }; /********************************************************************** - IPIPE API Structures +* IPIPE API Structures **********************************************************************/ /* IPIPE module configurations */ diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index 3ec7e65a3ffa..db49af90217e 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c @@ -147,7 +147,7 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video) mutex_lock(&mdev->graph_mutex); ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev); if (ret) { - mutex_unlock(&video->lock); + mutex_unlock(&mdev->graph_mutex); return -ENOMEM; } media_entity_graph_walk_start(&graph, entity); diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c index d009bcb439f0..68ede6c56e6d 100644 --- a/drivers/staging/media/lirc/lirc_parallel.c +++ b/drivers/staging/media/lirc/lirc_parallel.c @@ -193,7 +193,7 @@ static int lirc_claim(void) return 0; } } - out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP); + out(LIRC_LP_CONTROL, LP_PSELECP | LP_PINITP); is_claimed = 1; return 1; } @@ -264,7 +264,7 @@ static void lirc_lirc_irq_handler(void *blah) init = 1; } - timeout = timer/10; /* timeout after 1/10 sec. */ + timeout = timer / 10; /* timeout after 1/10 sec. */ signal = 1; level = lirc_get_timer(); do { @@ -286,15 +286,15 @@ static void lirc_lirc_irq_handler(void *blah) /* adjust value to usecs */ __u64 helper; - helper = ((__u64) signal)*1000000; + helper = ((__u64)signal) * 1000000; do_div(helper, timer); - signal = (long) helper; + signal = (long)helper; if (signal > LIRC_SFH506_DELAY) data = signal - LIRC_SFH506_DELAY; else data = 1; - rbuf_write(PULSE_BIT|data); /* pulse */ + rbuf_write(PULSE_BIT | data); /* pulse */ } lastkt = ktime_get(); #else @@ -331,7 +331,7 @@ static ssize_t lirc_read(struct file *filep, char __user *buf, size_t n, set_current_state(TASK_INTERRUPTIBLE); while (count < n) { if (rptr != wptr) { - if (copy_to_user(buf+count, &rbuf[rptr], + if (copy_to_user(buf + count, &rbuf[rptr], sizeof(int))) { result = -EFAULT; break; @@ -393,9 +393,9 @@ static ssize_t lirc_write(struct file *filep, const char __user *buf, size_t n, for (i = 0; i < count; i++) { __u64 helper; - helper = ((__u64) wbuf[i])*timer; + helper = ((__u64)wbuf[i]) * timer; do_div(helper, 1000000); - wbuf[i] = (int) helper; + wbuf[i] = (int)helper; } local_irq_save(flags); @@ -647,7 +647,7 @@ static int __init lirc_parallel_init(void) goto exit_device_put; pport = parport_find_base(io); - if (pport == NULL) { + if (!pport) { pr_notice("no port at %x found\n", io); result = -ENXIO; goto exit_device_put; @@ -656,7 +656,7 @@ static int __init lirc_parallel_init(void) pf, kf, lirc_lirc_irq_handler, 0, NULL); parport_put_port(pport); - if (ppdevice == NULL) { + if (!ppdevice) { pr_notice("parport_register_device() failed\n"); result = -ENXIO; goto exit_device_put; @@ -664,7 +664,7 @@ static int __init lirc_parallel_init(void) if (parport_claim(ppdevice) != 0) goto skip_init; is_claimed = 1; - out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP); + out(LIRC_LP_CONTROL, LP_PSELECP | LP_PINITP); #ifdef LIRC_TIMER if (debug) @@ -730,7 +730,7 @@ module_param(irq, int, S_IRUGO); MODULE_PARM_DESC(irq, "Interrupt (7 or 5)"); module_param(tx_mask, int, S_IRUGO); -MODULE_PARM_DESC(tx_maxk, "Transmitter mask (default: 0x01)"); +MODULE_PARM_DESC(tx_mask, "Transmitter mask (default: 0x01)"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debugging messages"); diff --git a/drivers/staging/media/timb/Kconfig b/drivers/staging/media/timb/Kconfig new file mode 100644 index 000000000000..e413fecc1e67 --- /dev/null +++ b/drivers/staging/media/timb/Kconfig @@ -0,0 +1,11 @@ +config VIDEO_TIMBERDALE + tristate "Support for timberdale Video In/LogiWIN" + depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && HAS_DMA + depends on (MFD_TIMBERDALE && TIMB_DMA) || COMPILE_TEST + select VIDEO_ADV7180 + select VIDEOBUF_DMA_CONTIG + ---help--- + Add support for the Video In peripherial of the timberdale FPGA. + + This driver is deprecated and will be removed soon unless someone + will start the work to convert this driver to the vb2 framework. diff --git a/drivers/staging/media/timb/Makefile b/drivers/staging/media/timb/Makefile new file mode 100644 index 000000000000..4c989c23a0e0 --- /dev/null +++ b/drivers/staging/media/timb/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o diff --git a/drivers/media/platform/timblogiw.c b/drivers/staging/media/timb/timblogiw.c index 113c9f3c0b3e..113c9f3c0b3e 100644 --- a/drivers/media/platform/timblogiw.c +++ b/drivers/staging/media/timb/timblogiw.c diff --git a/include/drm/drmP.h b/include/drm/drmP.h index d7162cf1c3e1..3c8422c69572 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -283,6 +283,7 @@ struct drm_ioctl_desc { struct drm_pending_event { struct drm_event *event; struct list_head link; + struct list_head pending_link; struct drm_file *file_priv; pid_t pid; /* pid of requester, no guarantee it's valid by the time we deliver the event, for tracing only */ @@ -346,6 +347,7 @@ struct drm_file { struct list_head blobs; wait_queue_head_t event_wait; + struct list_head pending_event_list; struct list_head event_list; int event_space; @@ -919,15 +921,25 @@ extern long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); - /* Device support (drm_fops.h) */ -extern int drm_open(struct inode *inode, struct file *filp); -extern ssize_t drm_read(struct file *filp, char __user *buffer, - size_t count, loff_t *offset); -extern int drm_release(struct inode *inode, struct file *filp); -extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv); - - /* Mapping support (drm_vm.h) */ -extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); +/* File Operations (drm_fops.c) */ +int drm_open(struct inode *inode, struct file *filp); +ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset); +int drm_release(struct inode *inode, struct file *filp); +int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv); +unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); +int drm_event_reserve_init_locked(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +int drm_event_reserve_init(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +void drm_event_cancel_free(struct drm_device *dev, + struct drm_pending_event *p); +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e); +void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); /* Misc. IOCTL support (drm_ioctl.c) */ int drm_noop(struct drm_device *dev, void *data, diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c65a212db77e..8c7fb3d0f9d0 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -307,6 +307,7 @@ struct drm_plane_helper_funcs; * @connectors_changed: connectors to this crtc have been updated * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors + * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders * @last_vblank_count: for helpers and drivers to capture the vblank of the * update to ensure framebuffer cleanup isn't done too early * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings @@ -341,6 +342,7 @@ struct drm_crtc_state { u32 plane_mask; u32 connector_mask; + u32 encoder_mask; /* last_vblank_count: for vblank waits before cleanup */ u32 last_vblank_count; @@ -2153,6 +2155,17 @@ struct drm_mode_config { list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ for_each_if ((plane_mask) & (1 << drm_plane_index(plane))) +/** + * drm_for_each_encoder_mask - iterate over encoders specified by bitmask + * @encoder: the loop cursor + * @dev: the DRM device + * @encoder_mask: bitmask of encoder indices + * + * Iterate over all encoders specified by bitmask. + */ +#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \ + list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \ + for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder))) #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) #define obj_to_connector(x) container_of(x, struct drm_connector, base) @@ -2225,6 +2238,7 @@ int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...); +extern unsigned int drm_encoder_index(struct drm_encoder *encoder); /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? @@ -2282,6 +2296,8 @@ extern void drm_property_destroy_user_blobs(struct drm_device *dev, extern bool drm_probe_ddc(struct i2c_adapter *adapter); extern struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter); +extern struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, + struct i2c_adapter *adapter); extern struct edid *drm_edid_duplicate(const struct edid *edid); extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); extern void drm_mode_config_init(struct drm_device *dev); @@ -2482,6 +2498,8 @@ extern int drm_format_num_planes(uint32_t format); extern int drm_format_plane_cpp(uint32_t format, int plane); extern int drm_format_horz_chroma_subsampling(uint32_t format); extern int drm_format_vert_chroma_subsampling(uint32_t format); +extern int drm_format_plane_width(int width, uint32_t format, int plane); +extern int drm_format_plane_height(int height, uint32_t format, int plane); extern const char *drm_get_format_name(uint32_t format); extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, unsigned int supported_rotations); diff --git a/include/drm/drm_dp_aux_dev.h b/include/drm/drm_dp_aux_dev.h new file mode 100644 index 000000000000..1b76d990d8ab --- /dev/null +++ b/include/drm/drm_dp_aux_dev.h @@ -0,0 +1,62 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Rafael Antognolli <rafael.antognolli@intel.com> + * + */ + +#ifndef DRM_DP_AUX_DEV +#define DRM_DP_AUX_DEV + +#include <drm/drm_dp_helper.h> + +#ifdef CONFIG_DRM_DP_AUX_CHARDEV + +int drm_dp_aux_dev_init(void); +void drm_dp_aux_dev_exit(void); +int drm_dp_aux_register_devnode(struct drm_dp_aux *aux); +void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux); + +#else + +static inline int drm_dp_aux_dev_init(void) +{ + return 0; +} + +static inline void drm_dp_aux_dev_exit(void) +{ +} + +static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux) +{ + return 0; +} + +static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) +{ +} + +#endif + +#endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index d8a40dff0d1d..062723bdcabe 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -219,6 +219,7 @@ struct drm_fb_helper { }; #ifdef CONFIG_DRM_FBDEV_EMULATION +int drm_fb_helper_modinit(void); void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, const struct drm_fb_helper_funcs *funcs); int drm_fb_helper_init(struct drm_device *dev, @@ -283,6 +284,11 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); #else +static inline int drm_fb_helper_modinit(void) +{ + return 0; +} + static inline void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, const struct drm_fb_helper_funcs *funcs) diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index a126a0d7aed4..b61c2d45192e 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -439,7 +439,7 @@ struct drm_encoder_helper_funcs { * can be modified by this callback and does not need to match mode. * * This function is used by both legacy CRTC helpers and atomic helpers. - * With atomic helpers it is optional. + * This hook is optional. * * NOTE: * diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 8544665ee4f4..3fd87b386ed7 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -1,9 +1,12 @@ #ifndef __DRM_OF_H__ #define __DRM_OF_H__ +#include <linux/of_graph.h> + struct component_master_ops; struct device; struct drm_device; +struct drm_encoder; struct device_node; #ifdef CONFIG_OF @@ -12,6 +15,9 @@ extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, extern int drm_of_component_probe(struct device *dev, int (*compare_of)(struct device *, void *), const struct component_master_ops *m_ops); +extern int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint); #else static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, struct device_node *port) @@ -26,6 +32,33 @@ drm_of_component_probe(struct device *dev, { return -EINVAL; } + +static inline int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint) +{ + return -EINVAL; +} #endif +static inline int drm_of_encoder_active_endpoint_id(struct device_node *node, + struct drm_encoder *encoder) +{ + struct of_endpoint endpoint; + int ret = drm_of_encoder_active_endpoint(node, encoder, + &endpoint); + + return ret ?: endpoint.id; +} + +static inline int drm_of_encoder_active_port_id(struct device_node *node, + struct drm_encoder *encoder) +{ + struct of_endpoint endpoint; + int ret = drm_of_encoder_active_endpoint(node, encoder, + &endpoint); + + return ret ?: endpoint.port; +} + #endif /* __DRM_OF_H__ */ diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h deleted file mode 100644 index cb65fa14acfc..000000000000 --- a/include/drm/exynos_drm.h +++ /dev/null @@ -1,101 +0,0 @@ -/* exynos_drm.h - * - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Authors: - * Inki Dae <inki.dae@samsung.com> - * Joonyoung Shim <jy0922.shim@samsung.com> - * Seung-Woo Kim <sw0312.kim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#ifndef _EXYNOS_DRM_H_ -#define _EXYNOS_DRM_H_ - -#include <uapi/drm/exynos_drm.h> -#include <video/videomode.h> - -/** - * A structure for lcd panel information. - * - * @timing: default video mode for initializing - * @width_mm: physical size of lcd width. - * @height_mm: physical size of lcd height. - */ -struct exynos_drm_panel_info { - struct videomode vm; - u32 width_mm; - u32 height_mm; -}; - -/** - * Platform Specific Structure for DRM based FIMD. - * - * @panel: default panel info for initializing - * @default_win: default window layer number to be used for UI. - * @bpp: default bit per pixel. - */ -struct exynos_drm_fimd_pdata { - struct exynos_drm_panel_info panel; - u32 vidcon0; - u32 vidcon1; - unsigned int default_win; - unsigned int bpp; -}; - -/** - * Platform Specific Structure for DRM based HDMI. - * - * @hdmi_dev: device point to specific hdmi driver. - * @mixer_dev: device point to specific mixer driver. - * - * this structure is used for common hdmi driver and each device object - * would be used to access specific device driver(hdmi or mixer driver) - */ -struct exynos_drm_common_hdmi_pd { - struct device *hdmi_dev; - struct device *mixer_dev; -}; - -/** - * Platform Specific Structure for DRM based HDMI core. - * - * @is_v13: set if hdmi version 13 is. - * @cfg_hpd: function pointer to configure hdmi hotplug detection pin - * @get_hpd: function pointer to get value of hdmi hotplug detection pin - */ -struct exynos_drm_hdmi_pdata { - bool is_v13; - void (*cfg_hpd)(bool external); - int (*get_hpd)(void); -}; - -/** - * Platform Specific Structure for DRM based IPP. - * - * @inv_pclk: if set 1. invert pixel clock - * @inv_vsync: if set 1. invert vsync signal for wb - * @inv_href: if set 1. invert href signal - * @inv_hsync: if set 1. invert hsync signal for wb - */ -struct exynos_drm_ipp_pol { - unsigned int inv_pclk; - unsigned int inv_vsync; - unsigned int inv_href; - unsigned int inv_hsync; -}; - -/** - * Platform Specific Structure for DRM based FIMC. - * - * @pol: current hardware block polarity settings. - * @clk_rate: current hardware clock rate. - */ -struct exynos_drm_fimc_pdata { - struct exynos_drm_ipp_pol pol; - int clk_rate; -}; - -#endif /* _EXYNOS_DRM_H_ */ diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index f97020904717..9094599a1150 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -277,7 +277,9 @@ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ #define INTEL_SKL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ @@ -296,7 +298,9 @@ #define INTEL_BXT_IDS(info) \ INTEL_VGA_DEVICE(0x0A84, info), \ INTEL_VGA_DEVICE(0x1A84, info), \ - INTEL_VGA_DEVICE(0x5A84, info) + INTEL_VGA_DEVICE(0x1A85, info), \ + INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ + INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ #define INTEL_KBL_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ diff --git a/include/media/i2c/tvp5150.h b/include/dt-bindings/media/tvp5150.h index 649908a25605..c852a35e916e 100644 --- a/include/media/i2c/tvp5150.h +++ b/include/dt-bindings/media/tvp5150.h @@ -18,16 +18,18 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#ifndef _TVP5150_H_ -#define _TVP5150_H_ +#ifndef _DT_BINDINGS_MEDIA_TVP5150_H +#define _DT_BINDINGS_MEDIA_TVP5150_H /* TVP5150 HW inputs */ #define TVP5150_COMPOSITE0 0 #define TVP5150_COMPOSITE1 1 #define TVP5150_SVIDEO 2 +#define TVP5150_INPUT_NUM 3 + /* TVP5150 HW outputs */ #define TVP5150_NORMAL 0 #define TVP5150_BLACK_SCREEN 1 -#endif +#endif /* _DT_BINDINGS_MEDIA_TVP5150_H */ diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h new file mode 100644 index 000000000000..b2d32e01dfe4 --- /dev/null +++ b/include/linux/apple-gmux.h @@ -0,0 +1,50 @@ +/* + * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro + * Copyright (C) 2015 Lukas Wunner <lukas@wunner.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef LINUX_APPLE_GMUX_H +#define LINUX_APPLE_GMUX_H + +#include <linux/acpi.h> + +#define GMUX_ACPI_HID "APP000B" + +#if IS_ENABLED(CONFIG_APPLE_GMUX) + +/** + * apple_gmux_present() - detect if gmux is built into the machine + * + * Drivers may use this to activate quirks specific to dual GPU MacBook Pros + * and Mac Pros, e.g. for deferred probing, runtime pm and backlight. + * + * Return: %true if gmux is present and the kernel was configured + * with CONFIG_APPLE_GMUX, %false otherwise. + */ +static inline bool apple_gmux_present(void) +{ + return acpi_dev_present(GMUX_ACPI_HID); +} + +#else /* !CONFIG_APPLE_GMUX */ + +static inline bool apple_gmux_present(void) +{ + return false; +} + +#endif /* !CONFIG_APPLE_GMUX */ + +#endif /* LINUX_APPLE_GMUX_H */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index f98bd7068d55..532108ea0c1c 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -54,7 +54,7 @@ struct dma_buf_attachment; * @release: release this buffer; to be called after the last dma_buf_put. * @begin_cpu_access: [optional] called before cpu access to invalidate cpu * caches and allocate backing storage (if not yet done) - * respectively pin the objet into memory. + * respectively pin the object into memory. * @end_cpu_access: [optional] called after cpu access to flush caches. * @kmap_atomic: maps a page from the buffer into kernel address * space, users may not block until the subsequent unmap call. @@ -93,10 +93,8 @@ struct dma_buf_ops { /* after final dma_buf_put() */ void (*release)(struct dma_buf *); - int (*begin_cpu_access)(struct dma_buf *, size_t, size_t, - enum dma_data_direction); - void (*end_cpu_access)(struct dma_buf *, size_t, size_t, - enum dma_data_direction); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); void *(*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void *(*kmap)(struct dma_buf *, unsigned long); @@ -224,9 +222,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); -int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, +int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); -void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, +void dma_buf_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index 69e1d4a1f1b3..b39a5f3153bd 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -36,6 +36,26 @@ struct pci_dev; /** + * enum vga_switcheroo_handler_flags_t - handler flags bitmask + * @VGA_SWITCHEROO_CAN_SWITCH_DDC: whether the handler is able to switch the + * DDC lines separately. This signals to clients that they should call + * drm_get_edid_switcheroo() to probe the EDID + * @VGA_SWITCHEROO_NEEDS_EDP_CONFIG: whether the handler is unable to switch + * the AUX channel separately. This signals to clients that the active + * GPU needs to train the link and communicate the link parameters to the + * inactive GPU (mediated by vga_switcheroo). The inactive GPU may then + * skip the AUX handshake and set up its output with these pre-calibrated + * values (DisplayPort specification v1.1a, section 2.5.3.3) + * + * Handler flags bitmask. Used by handlers to declare their capabilities upon + * registering with vga_switcheroo. + */ +enum vga_switcheroo_handler_flags_t { + VGA_SWITCHEROO_CAN_SWITCH_DDC = (1 << 0), + VGA_SWITCHEROO_NEEDS_EDP_CONFIG = (1 << 1), +}; + +/** * enum vga_switcheroo_state - client power state * @VGA_SWITCHEROO_OFF: off * @VGA_SWITCHEROO_ON: on @@ -82,6 +102,9 @@ enum vga_switcheroo_client_id { * Mandatory. For muxless machines this should be a no-op. Returning 0 * denotes success, anything else failure (in which case the switch is * aborted) + * @switch_ddc: switch DDC lines to given client. + * Optional. Should return the previous DDC owner on success or a + * negative int on failure * @power_state: cut or reinstate power of given client. * Optional. The return value is ignored * @get_client_id: determine if given pci device is integrated or discrete GPU. @@ -93,6 +116,7 @@ enum vga_switcheroo_client_id { struct vga_switcheroo_handler { int (*init)(void); int (*switchto)(enum vga_switcheroo_client_id id); + int (*switch_ddc)(enum vga_switcheroo_client_id id); int (*power_state)(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state); enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev); @@ -132,8 +156,12 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev, void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info); -int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler); +int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler, + enum vga_switcheroo_handler_flags_t handler_flags); void vga_switcheroo_unregister_handler(void); +enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void); +int vga_switcheroo_lock_ddc(struct pci_dev *pdev); +int vga_switcheroo_unlock_ddc(struct pci_dev *pdev); int vga_switcheroo_process_delayed_switch(void); @@ -150,11 +178,15 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} static inline int vga_switcheroo_register_client(struct pci_dev *dev, const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; } static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} -static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; } +static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler, + enum vga_switcheroo_handler_flags_t handler_flags) { return 0; } static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, enum vga_switcheroo_client_id id) { return 0; } static inline void vga_switcheroo_unregister_handler(void) {} +static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; } +static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; } +static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; } static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } diff --git a/include/media/media-device.h b/include/media/media-device.h index d3855898c3fc..165451bc3985 100644 --- a/include/media/media-device.h +++ b/include/media/media-device.h @@ -268,6 +268,10 @@ struct device; * struct media_device - Media device * @dev: Parent device * @devnode: Media device node + * @driver_name: Optional device driver name. If not set, calls to + * %MEDIA_IOC_DEVICE_INFO will return dev->driver->name. + * This is needed for USB drivers for example, as otherwise + * they'll all appear as if the driver name was "usb". * @model: Device model name * @serial: Device serial number (optional) * @bus_info: Unique and stable device location identifier @@ -303,6 +307,7 @@ struct media_device { struct media_devnode devnode; char model[32]; + char driver_name[32]; char serial[40]; char bus_info[32]; u32 hw_revision; diff --git a/include/media/rc-core.h b/include/media/rc-core.h index f6494709e230..c41dd7018fa8 100644 --- a/include/media/rc-core.h +++ b/include/media/rc-core.h @@ -60,6 +60,7 @@ enum rc_filter_type { /** * struct rc_dev - represents a remote control device * @dev: driver model's view of this device + * @initialized: true if the device init has completed * @sysfs_groups: sysfs attribute groups * @input_name: name of the input child device * @input_phys: physical path to the input child device @@ -121,6 +122,7 @@ enum rc_filter_type { */ struct rc_dev { struct device dev; + bool initialized; const struct attribute_group *sysfs_groups[5]; const char *input_name; const char *input_phys; diff --git a/include/media/tuner.h b/include/media/tuner.h index e5321fda5489..b3edc14e763f 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -20,14 +20,7 @@ #ifdef __KERNEL__ #include <linux/videodev2.h> - -/* Tuner PADs */ -/* FIXME: is this the right place for it? */ -enum tuner_pad_index { - TUNER_PAD_RF_INPUT, - TUNER_PAD_IF_OUTPUT, - TUNER_NUM_PADS -}; +#include <media/v4l2-mc.h> #define ADDR_UNSET (255) diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index da6fe9802fee..0bc9b35b8f3e 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -535,18 +535,6 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, u32 id, u8 max, u8 def, const s64 *qmenu_int); /** - * v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler. - * @hdl: The control handler. - * @ctrl: The control to add. - * - * It will return NULL if it was unable to add the control reference. - * If the control already belonged to the handler, then it will do - * nothing and just return @ctrl. - */ -struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl, - struct v4l2_ctrl *ctrl); - -/** * v4l2_ctrl_add_handler() - Add all controls from handler @add to * handler @hdl. * @hdl: The control handler. diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h new file mode 100644 index 000000000000..79d84bb3573c --- /dev/null +++ b/include/media/v4l2-mc.h @@ -0,0 +1,172 @@ +/* + * v4l2-mc.h - Media Controller V4L2 types and prototypes + * + * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _V4L2_MC_H +#define _V4L2_MC_H + +#include <media/media-device.h> + +/** + * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER + * + * @TUNER_PAD_RF_INPUT: Radiofrequency (RF) sink pad, usually linked to a + * RF connector entity. + * @TUNER_PAD_OUTPUT: Tuner video output source pad. Contains the video + * chrominance and luminance or the hole bandwidth + * of the signal converted to an Intermediate Frequency + * (IF) or to baseband (on zero-IF tuners). + * @TUNER_PAD_AUD_OUT: Tuner audio output source pad. Tuners used to decode + * analog TV signals have an extra pad for audio output. + * Old tuners use an analog stage with a saw filter for + * the audio IF frequency. The output of the pad is, in + * this case, the audio IF, with should be decoded either + * by the bridge chipset (that's the case of cx2388x + * chipsets) or may require an external IF sound + * processor, like msp34xx. On modern silicon tuners, + * the audio IF decoder is usually incorporated at the + * tuner. On such case, the output of this pad is an + * audio sampled data. + * @TUNER_NUM_PADS: Number of pads of the tuner. + */ +enum tuner_pad_index { + TUNER_PAD_RF_INPUT, + TUNER_PAD_OUTPUT, + TUNER_PAD_AUD_OUT, + TUNER_NUM_PADS +}; + +/** + * enum if_vid_dec_index - video IF-PLL pad index for + * MEDIA_ENT_F_IF_VID_DECODER + * + * @IF_VID_DEC_PAD_IF_INPUT: video Intermediate Frequency (IF) sink pad + * @IF_VID_DEC_PAD_OUT: IF-PLL video output source pad. Contains the + * video chrominance and luminance IF signals. + * @IF_VID_DEC_PAD_NUM_PADS: Number of pads of the video IF-PLL. + */ +enum if_vid_dec_pad_index { + IF_VID_DEC_PAD_IF_INPUT, + IF_VID_DEC_PAD_OUT, + IF_VID_DEC_PAD_NUM_PADS +}; + +/** + * enum if_aud_dec_index - audio/sound IF-PLL pad index for + * MEDIA_ENT_F_IF_AUD_DECODER + * + * @IF_AUD_DEC_PAD_IF_INPUT: audio Intermediate Frequency (IF) sink pad + * @IF_AUD_DEC_PAD_OUT: IF-PLL audio output source pad. Contains the + * audio sampled stream data, usually connected + * to the bridge bus via an Inter-IC Sound (I2S) + * bus. + * @IF_AUD_DEC_PAD_NUM_PADS: Number of pads of the audio IF-PLL. + */ +enum if_aud_dec_pad_index { + IF_AUD_DEC_PAD_IF_INPUT, + IF_AUD_DEC_PAD_OUT, + IF_AUD_DEC_PAD_NUM_PADS +}; + +/** + * enum demod_pad_index - analog TV pad index for MEDIA_ENT_F_ATV_DECODER + * + * @DEMOD_PAD_IF_INPUT: IF input sink pad. + * @DEMOD_PAD_VID_OUT: Video output source pad. + * @DEMOD_PAD_VBI_OUT: Vertical Blank Interface (VBI) output source pad. + * @DEMOD_NUM_PADS: Maximum number of output pads. + */ +enum demod_pad_index { + DEMOD_PAD_IF_INPUT, + DEMOD_PAD_VID_OUT, + DEMOD_PAD_VBI_OUT, + DEMOD_NUM_PADS +}; + +/* We don't need to include pci.h or usb.h here */ +struct pci_dev; +struct usb_device; + +#ifdef CONFIG_MEDIA_CONTROLLER +/** + * v4l2_mc_create_media_graph() - create Media Controller links at the graph. + * + * @mdev: pointer to the &media_device struct. + * + * Add links between the entities commonly found on PC customer's hardware at + * the V4L2 side: camera sensors, audio and video PLL-IF decoders, tuners, + * analog TV decoder and I/O entities (video, VBI and Software Defined Radio). + * NOTE: webcams are modelled on a very simple way: the sensor is + * connected directly to the I/O entity. All dirty details, like + * scaler and crop HW are hidden. While such mapping is enough for v4l2 + * interface centric PC-consumer's hardware, V4L2 subdev centric camera + * hardware should not use this routine, as it will not build the right graph. + */ +int v4l2_mc_create_media_graph(struct media_device *mdev); + +/** + * v4l2_mc_pci_media_device_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @pci_dev: pointer to struct pci_dev + * @name: media device name. If %NULL, the routine will use the default + * name for the pci device, given by pci_name() macro. + */ +struct media_device *v4l2_mc_pci_media_device_init(struct pci_dev *pci_dev, + const char *name); +/** + * __v4l2_mc_usb_media_device_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @udev: pointer to struct usb_device + * @board_name: media device name. If %NULL, the routine will use the usb + * product name, if available. + * @driver_name: name of the driver. if %NULL, the routine will use the name + * given by udev->dev->driver->name, with is usually the wrong + * thing to do. + * + * NOTE: It is better to call v4l2_mc_usb_media_device_init() instead, as + * such macro fills driver_name with %KBUILD_MODNAME. + */ +struct media_device *__v4l2_mc_usb_media_device_init(struct usb_device *udev, + const char *board_name, + const char *driver_name); + +#else +static inline int v4l2_mc_create_media_graph(struct media_device *mdev) +{ + return 0; +} + +static inline +struct media_device *v4l2_mc_pci_media_device_init(struct pci_dev *pci_dev, + char *name) +{ + return NULL; +} + +static inline +struct media_device *__v4l2_mc_usb_media_device_init(struct usb_device *udev, + char *board_name, + char *driver_name) +{ + return NULL; +} +#endif + +#define v4l2_mc_usb_media_device_init(udev, name) \ + __v4l2_mc_usb_media_device_init(udev, name, KBUILD_MODNAME) + +#endif diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index b273cf9ac047..11e2dfec0198 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -179,6 +179,8 @@ struct v4l2_subdev_io_pin_config { * for it to be warned when the value of a control changes. * * @unsubscribe_event: remove event subscription from the control framework. + * + * @registered_async: the subdevice has been registered async. */ struct v4l2_subdev_core_ops { int (*log_status)(struct v4l2_subdev *sd); @@ -211,6 +213,7 @@ struct v4l2_subdev_core_ops { struct v4l2_event_subscription *sub); int (*unsubscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub); + int (*registered_async)(struct v4l2_subdev *sd); }; /** diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h index 5b64c9eac2c9..87b559024b4a 100644 --- a/include/media/videobuf2-dvb.h +++ b/include/media/videobuf2-dvb.h @@ -8,6 +8,10 @@ #include <dvb_frontend.h> #include <media/videobuf2-v4l2.h> + +/* We don't actually need to include media-device.h here */ +struct media_device; + /* * TODO: This header file should be replaced with videobuf2-core.h * Currently, vb2_thread is not a stuff of videobuf2-core, @@ -50,6 +54,7 @@ int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, struct module *module, void *adapter_priv, struct device *device, + struct media_device *mdev, short *adapter_nr, int mfe_shared); diff --git a/include/media/vsp1.h b/include/media/vsp1.h new file mode 100644 index 000000000000..cc541753896f --- /dev/null +++ b/include/media/vsp1.h @@ -0,0 +1,33 @@ +/* + * vsp1.h -- R-Car VSP1 API + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __MEDIA_VSP1_H__ +#define __MEDIA_VSP1_H__ + +#include <linux/types.h> + +struct device; +struct v4l2_rect; + +int vsp1_du_init(struct device *dev); + +int vsp1_du_setup_lif(struct device *dev, unsigned int width, + unsigned int height); + +int vsp1_du_atomic_begin(struct device *dev); +int vsp1_du_atomic_update(struct device *dev, unsigned int rpf, u32 pixelformat, + unsigned int pitch, dma_addr_t mem[2], + const struct v4l2_rect *src, + const struct v4l2_rect *dst); +int vsp1_du_atomic_flush(struct device *dev); + +#endif /* __MEDIA_VSP1_H__ */ diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index b4e92eb12044..a0ebfe7c9a28 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -669,6 +669,7 @@ struct drm_set_client_cap { __u64 value; }; +#define DRM_RDWR O_RDWR #define DRM_CLOEXEC O_CLOEXEC struct drm_prime_handle { __u32 handle; diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h index 312c67d744ae..d2a5bb1c22db 100644 --- a/include/uapi/drm/exynos_drm.h +++ b/include/uapi/drm/exynos_drm.h @@ -28,8 +28,8 @@ */ struct drm_exynos_gem_create { __u64 size; - unsigned int flags; - unsigned int handle; + __u32 flags; + __u32 handle; }; /** @@ -42,8 +42,8 @@ struct drm_exynos_gem_create { * be set by driver. */ struct drm_exynos_gem_info { - unsigned int handle; - unsigned int flags; + __u32 handle; + __u32 flags; __u64 size; }; @@ -56,8 +56,8 @@ struct drm_exynos_gem_info { * @edid: the edid data pointer from user side. */ struct drm_exynos_vidi_connection { - unsigned int connection; - unsigned int extensions; + __u32 connection; + __u32 extensions; __u64 edid; }; @@ -206,9 +206,9 @@ struct drm_exynos_ipp_prop_list { * @pos: property of image position(src-cropped,dst-scaler). */ struct drm_exynos_ipp_config { - enum drm_exynos_ops_id ops_id; - enum drm_exynos_flip flip; - enum drm_exynos_degree degree; + __u32 ops_id; + __u32 flip; + __u32 degree; __u32 fmt; struct drm_exynos_sz sz; struct drm_exynos_pos pos; @@ -233,7 +233,7 @@ enum drm_exynos_ipp_cmd { */ struct drm_exynos_ipp_property { struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX]; - enum drm_exynos_ipp_cmd cmd; + __u32 cmd; __u32 ipp_id; __u32 prop_id; __u32 refresh_rate; @@ -255,8 +255,8 @@ enum drm_exynos_ipp_buf_type { * @user_data: user data. */ struct drm_exynos_ipp_queue_buf { - enum drm_exynos_ops_id ops_id; - enum drm_exynos_ipp_buf_type buf_type; + __u32 ops_id; + __u32 buf_type; __u32 prop_id; __u32 buf_id; __u32 handle[EXYNOS_DRM_PLANAR_MAX]; @@ -280,7 +280,7 @@ enum drm_exynos_ipp_ctrl { */ struct drm_exynos_ipp_cmd_ctrl { __u32 prop_id; - enum drm_exynos_ipp_ctrl ctrl; + __u32 ctrl; }; #define DRM_EXYNOS_GEM_CREATE 0x00 diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index acf21026c78a..a5524cc95ff8 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -772,10 +772,12 @@ struct drm_i915_gem_execbuffer2 { #define I915_EXEC_HANDLE_LUT (1<<12) /** Used for switching BSD rings on the platforms with two BSD rings */ -#define I915_EXEC_BSD_MASK (3<<13) -#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */ -#define I915_EXEC_BSD_RING1 (1<<13) -#define I915_EXEC_BSD_RING2 (2<<13) +#define I915_EXEC_BSD_SHIFT (13) +#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT) +/* default ping-pong mode */ +#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT) +#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT) +#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT) /** Tell the kernel that the batchbuffer is processed by * the resource streamer. @@ -812,10 +814,35 @@ struct drm_i915_gem_busy { /** Handle of the buffer to check for busy */ __u32 handle; - /** Return busy status (1 if busy, 0 if idle). - * The high word is used to indicate on which rings the object - * currently resides: - * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) + /** Return busy status + * + * A return of 0 implies that the object is idle (after + * having flushed any pending activity), and a non-zero return that + * the object is still in-flight on the GPU. (The GPU has not yet + * signaled completion for all pending requests that reference the + * object.) + * + * The returned dword is split into two fields to indicate both + * the engines on which the object is being read, and the + * engine on which it is currently being written (if any). + * + * The low word (bits 0:15) indicate if the object is being written + * to by any engine (there can only be one, as the GEM implicit + * synchronisation rules force writes to be serialised). Only the + * engine for the last write is reported. + * + * The high word (bits 16:31) are a bitmask of which engines are + * currently reading from the object. Multiple engines may be + * reading from the object simultaneously. + * + * The value of each engine is the same as specified in the + * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc. + * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to + * the I915_EXEC_RENDER engine for execution, and so it is never + * reported as active itself. Some hardware may have parallel + * execution engines, e.g. multiple media engines, which are + * mapped to the same identifier in the EXECBUFFER2 ioctl and + * so are not separately reported for busyness. */ __u32 busy; }; diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 81e6e0d1d360..254d3e92d18e 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -50,6 +50,8 @@ struct drm_msm_timespec { #define MSM_PARAM_GPU_ID 0x01 #define MSM_PARAM_GMEM_SIZE 0x02 #define MSM_PARAM_CHIP_ID 0x03 +#define MSM_PARAM_MAX_FREQ 0x04 +#define MSM_PARAM_TIMESTAMP 0x05 struct drm_msm_param { __u32 pipe; /* in, MSM_PIPE_x */ diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h new file mode 100644 index 000000000000..fb0dedb7c121 --- /dev/null +++ b/include/uapi/linux/dma-buf.h @@ -0,0 +1,40 @@ +/* + * Framework for buffer objects that can be shared across devices/subsystems. + * + * Copyright(C) 2015 Intel Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _DMA_BUF_UAPI_H_ +#define _DMA_BUF_UAPI_H_ + +#include <linux/types.h> + +/* begin/end dma-buf functions used for userspace mmap. */ +struct dma_buf_sync { + __u64 flags; +}; + +#define DMA_BUF_SYNC_READ (1 << 0) +#define DMA_BUF_SYNC_WRITE (2 << 0) +#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE) +#define DMA_BUF_SYNC_START (0 << 2) +#define DMA_BUF_SYNC_END (1 << 2) +#define DMA_BUF_SYNC_VALID_FLAGS_MASK \ + (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) + +#define DMA_BUF_BASE 'b' +#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) + +#endif diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h index 1e3c8cb43bd7..6aac2f035a5d 100644 --- a/include/uapi/linux/media.h +++ b/include/uapi/linux/media.h @@ -72,21 +72,28 @@ struct media_device_info { #define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 4) /* - * Connectors + * I/O entities */ -/* It is a responsibility of the entity drivers to add connectors and links */ -#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 21) -#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 22) -#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 23) -/* For internal test signal generators and other debug connectors */ -#define MEDIA_ENT_F_CONN_TEST (MEDIA_ENT_F_BASE + 24) +#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 1001) +#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 1002) +#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 1003) /* - * I/O entities + * Analog TV IF-PLL decoders + * + * It is a responsibility of the master/bridge drivers to create links + * for MEDIA_ENT_F_IF_VID_DECODER and MEDIA_ENT_F_IF_AUD_DECODER. + */ +#define MEDIA_ENT_F_IF_VID_DECODER (MEDIA_ENT_F_BASE + 2001) +#define MEDIA_ENT_F_IF_AUD_DECODER (MEDIA_ENT_F_BASE + 2002) + +/* + * Connectors */ -#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 31) -#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 32) -#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 33) +/* It is a responsibility of the entity drivers to add connectors and links */ +#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 10001) +#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 10002) +#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 10003) /* * Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and @@ -107,8 +114,12 @@ struct media_device_info { #define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3) #define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4) /* - * It is a responsibility of the entity drivers to add connectors and links - * for the tuner entities. + * It is a responsibility of the master/bridge drivers to add connectors + * and links for MEDIA_ENT_F_TUNER. Please notice that some old tuners + * may require the usage of separate I2C chips to decode analog TV signals, + * when the master/bridge chipset doesn't have its own TV standard decoder. + * On such cases, the IF-PLL staging is mapped via one or two entities: + * MEDIA_ENT_F_IF_VID_DECODER and/or MEDIA_ENT_F_IF_AUD_DECODER. */ #define MEDIA_ENT_F_TUNER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 5) @@ -286,7 +297,7 @@ struct media_links_enum { * later, before the adding this API upstream. */ -#if 0 /* Let's postpone it to Kernel 4.6 */ + struct media_v2_entity { __u32 id; char name[64]; /* FIXME: move to a property? (RFC says so) */ @@ -351,7 +362,6 @@ static inline void __user *media_get_uptr(__u64 arg) { return (void __user *)(uintptr_t)arg; } -#endif /* ioctls */ @@ -359,9 +369,6 @@ static inline void __user *media_get_uptr(__u64 arg) #define MEDIA_IOC_ENUM_ENTITIES _IOWR('|', 0x01, struct media_entity_desc) #define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum) #define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc) - -#if 0 /* Let's postpone it to Kernel 4.6 */ #define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology) -#endif #endif /* __LINUX_MEDIA_H */ diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h index 15273987093e..5b3f685a2d50 100644 --- a/include/uapi/linux/v4l2-common.h +++ b/include/uapi/linux/v4l2-common.h @@ -10,19 +10,43 @@ * Copyright (C) 2012 Nokia Corporation * Contact: Sakari Ailus <sakari.ailus@iki.fi> * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 2d225bcdb831..b6a357a5f053 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -390,6 +390,7 @@ enum v4l2_mpeg_video_multi_slice_mode { #define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (V4L2_CID_MPEG_BASE+226) #define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_MPEG_BASE+227) #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228) +#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229) #define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300) #define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301) @@ -912,8 +913,18 @@ enum v4l2_dv_rgb_range { V4L2_DV_RGB_RANGE_FULL = 2, }; +#define V4L2_CID_DV_TX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 6) +enum v4l2_dv_it_content_type { + V4L2_DV_IT_CONTENT_TYPE_GRAPHICS = 0, + V4L2_DV_IT_CONTENT_TYPE_PHOTO = 1, + V4L2_DV_IT_CONTENT_TYPE_CINEMA = 2, + V4L2_DV_IT_CONTENT_TYPE_GAME = 3, + V4L2_DV_IT_CONTENT_TYPE_NO_ITC = 4, +}; + #define V4L2_CID_DV_RX_POWER_PRESENT (V4L2_CID_DV_CLASS_BASE + 100) #define V4L2_CID_DV_RX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 101) +#define V4L2_CID_DV_RX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 102) #define V4L2_CID_FM_RX_CLASS_BASE (V4L2_CTRL_CLASS_FM_RX | 0x900) #define V4L2_CID_FM_RX_CLASS (V4L2_CTRL_CLASS_FM_RX | 1) diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 14cd5ebfee6d..466458422385 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -546,6 +546,10 @@ struct v4l2_pix_format { /* three non contiguous planes - Y, Cb, Cr */ #define V4L2_PIX_FMT_YUV420M v4l2_fourcc('Y', 'M', '1', '2') /* 12 YUV420 planar */ #define V4L2_PIX_FMT_YVU420M v4l2_fourcc('Y', 'M', '2', '1') /* 12 YVU420 planar */ +#define V4L2_PIX_FMT_YUV422M v4l2_fourcc('Y', 'M', '1', '6') /* 16 YUV422 planar */ +#define V4L2_PIX_FMT_YVU422M v4l2_fourcc('Y', 'M', '6', '1') /* 16 YVU422 planar */ +#define V4L2_PIX_FMT_YUV444M v4l2_fourcc('Y', 'M', '2', '4') /* 24 YUV444 planar */ +#define V4L2_PIX_FMT_YVU444M v4l2_fourcc('Y', 'M', '4', '2') /* 24 YVU444 planar */ /* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */ #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ diff --git a/mm/swapfile.c b/mm/swapfile.c index d2c37365e2d6..38884383ebac 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -48,6 +48,12 @@ static sector_t map_swap_entry(swp_entry_t, struct block_device**); DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; atomic_long_t nr_swap_pages; +/* + * Some modules use swappable objects and may try to swap them out under + * memory pressure (via the shrinker). Before doing so, they may wish to + * check to see if any swap space is available. + */ +EXPORT_SYMBOL_GPL(nr_swap_pages); /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ long total_swap_pages; static int least_priority; |