diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
107 files changed, 41339 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile new file mode 100644 index 000000000000..7e73aa587967 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/Makefile @@ -0,0 +1,5 @@ +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/.. + +# Extra header tests +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gt/gen6_renderstate.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c new file mode 100644 index 000000000000..11c8e7b3dd7c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen6_renderstate.c @@ -0,0 +1,315 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Generated by: intel-gpu-tools-1.8-220-g01153e7 + */ + +#include "intel_renderstate.h" + +static const u32 gen6_null_state_relocs[] = { + 0x00000020, + 0x00000024, + 0x0000002c, + 0x000001e0, + 0x000001e4, + -1, +}; + +static const u32 gen6_null_state_batch[] = { + 0x69040000, + 0x790d0001, + 0x00000000, + 0x00000000, + 0x78180000, + 0x00000001, + 0x61010008, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000001, + 0x61020000, + 0x00000000, + 0x78050001, + 0x00000018, + 0x00000000, + 0x780d1002, + 0x00000000, + 0x00000000, + 0x00000420, + 0x78150003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78100004, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78160003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78110005, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78120002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78170003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79050005, + 0xe0040000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79100000, + 0x00000000, + 0x79000002, + 0xffffffff, + 0x00000000, + 0x00000000, + 0x780e0002, + 0x00000441, + 0x00000401, + 0x00000401, + 0x78021002, + 0x00000000, + 0x00000000, + 0x00000400, + 0x78130012, + 0x00400810, + 0x00000000, + 0x20000000, + 0x04000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78140007, + 0x00000280, + 0x08080000, + 0x00000000, + 0x00060000, + 0x4e080002, + 0x00100400, + 0x00000000, + 0x00000000, + 0x78090005, + 0x02000000, + 0x22220000, + 0x02f60000, + 0x11330000, + 0x02850004, + 0x11220000, + 0x78011002, + 0x00000000, + 0x00000000, + 0x00000200, + 0x78080003, + 0x00002000, + 0x00000448, /* reloc */ + 0x00000448, /* reloc */ + 0x00000000, + 0x05000000, /* cmds end */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000220, /* state start */ + 0x00000240, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0060005a, + 0x204077be, + 0x000000c0, + 0x008d0040, + 0x0060005a, + 0x206077be, + 0x000000c0, + 0x008d0080, + 0x0060005a, + 0x208077be, + 0x000000d0, + 0x008d0040, + 0x0060005a, + 0x20a077be, + 0x000000d0, + 0x008d0080, + 0x00000201, + 0x20080061, + 0x00000000, + 0x00000000, + 0x00600001, + 0x20200022, + 0x008d0000, + 0x00000000, + 0x02800031, + 0x21c01cc9, + 0x00000020, + 0x0a8a0001, + 0x00600001, + 0x204003be, + 0x008d01c0, + 0x00000000, + 0x00600001, + 0x206003be, + 0x008d01e0, + 0x00000000, + 0x00600001, + 0x208003be, + 0x008d0200, + 0x00000000, + 0x00600001, + 0x20a003be, + 0x008d0220, + 0x00000000, + 0x00600001, + 0x20c003be, + 0x008d0240, + 0x00000000, + 0x00600001, + 0x20e003be, + 0x008d0260, + 0x00000000, + 0x00600001, + 0x210003be, + 0x008d0280, + 0x00000000, + 0x00600001, + 0x212003be, + 0x008d02a0, + 0x00000000, + 0x05800031, + 0x24001cc8, + 0x00000040, + 0x90019000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x30000000, + 0x00000124, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0xf99a130c, + 0x799a130c, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x80000031, + 0x00000003, + 0x00000000, /* state end */ +}; + +RO_RENDERSTATE(6); diff --git a/drivers/gpu/drm/i915/gt/gen7_renderstate.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c new file mode 100644 index 000000000000..655180646152 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen7_renderstate.c @@ -0,0 +1,279 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Generated by: intel-gpu-tools-1.8-220-g01153e7 + */ + +#include "intel_renderstate.h" + +static const u32 gen7_null_state_relocs[] = { + 0x0000000c, + 0x00000010, + 0x00000018, + 0x000001ec, + -1, +}; + +static const u32 gen7_null_state_batch[] = { + 0x69040000, + 0x61010008, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000001, + 0x790d0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78180000, + 0x00000001, + 0x79160000, + 0x00000008, + 0x78300000, + 0x02010040, + 0x78310000, + 0x04000000, + 0x78320000, + 0x04000000, + 0x78330000, + 0x02000000, + 0x78100004, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781b0005, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781c0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781d0004, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78110005, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78120002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78210000, + 0x00000000, + 0x78130005, + 0x00000000, + 0x20000000, + 0x04000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78140001, + 0x20000800, + 0x00000000, + 0x781e0001, + 0x00000000, + 0x00000000, + 0x78050005, + 0xe0040000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78040001, + 0x00000000, + 0x00000000, + 0x78240000, + 0x00000240, + 0x78230000, + 0x00000260, + 0x782f0000, + 0x00000280, + 0x781f000c, + 0x00400810, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78200006, + 0x000002c0, + 0x08080000, + 0x00000000, + 0x28000402, + 0x00060000, + 0x00000000, + 0x00000000, + 0x78090005, + 0x02000000, + 0x22220000, + 0x02f60000, + 0x11230000, + 0x02f60004, + 0x11230000, + 0x78080003, + 0x00006008, + 0x00000340, /* reloc */ + 0xffffffff, + 0x00000000, + 0x782a0000, + 0x00000360, + 0x79000002, + 0xffffffff, + 0x00000000, + 0x00000000, + 0x7b000005, + 0x0000000f, + 0x00000003, + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000000, + 0x05000000, /* cmds end */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000031, /* state start */ + 0x00000003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0xf99a130c, + 0x799a130c, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000492, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0080005a, + 0x2e2077bd, + 0x000000c0, + 0x008d0040, + 0x0080005a, + 0x2e6077bd, + 0x000000d0, + 0x008d0040, + 0x02800031, + 0x21801fa9, + 0x008d0e20, + 0x08840001, + 0x00800001, + 0x2e2003bd, + 0x008d0180, + 0x00000000, + 0x00800001, + 0x2e6003bd, + 0x008d01c0, + 0x00000000, + 0x00800001, + 0x2ea003bd, + 0x008d0200, + 0x00000000, + 0x00800001, + 0x2ee003bd, + 0x008d0240, + 0x00000000, + 0x05800031, + 0x20001fa8, + 0x008d0e20, + 0x90031000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000380, + 0x000003a0, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state end */ +}; + +RO_RENDERSTATE(7); diff --git a/drivers/gpu/drm/i915/gt/gen8_renderstate.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c new file mode 100644 index 000000000000..95288a34c15d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen8_renderstate.c @@ -0,0 +1,983 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Generated by: intel-gpu-tools-1.8-220-g01153e7 + */ + +#include "intel_renderstate.h" + +static const u32 gen8_null_state_relocs[] = { + 0x00000798, + 0x000007a4, + 0x000007ac, + 0x000007bc, + -1, +}; + +static const u32 gen8_null_state_batch[] = { + 0x7a000004, + 0x01000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x69040000, + 0x78140000, + 0x04000000, + 0x7820000a, + 0x00000000, + 0x00000000, + 0x80000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78130002, + 0x00000000, + 0x00000000, + 0x02001808, + 0x781f0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78510009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78100007, + 0x00000000, + 0x00000000, + 0x00010000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781b0007, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000800, + 0x00000000, + 0x78110008, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781e0003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781d0007, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78120002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78500003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781c0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x780c0000, + 0x00000000, + 0x78520003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78300000, + 0x08010040, + 0x78310000, + 0x1e000000, + 0x78320000, + 0x1e000000, + 0x78330000, + 0x1e000000, + 0x79190002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x791a0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x791b0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79120000, + 0x00000000, + 0x79130000, + 0x00000000, + 0x79140000, + 0x00000000, + 0x79150000, + 0x00000000, + 0x79160000, + 0x00000000, + 0x78150009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78190009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781a0009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78160009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78170009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78490001, + 0x00000000, + 0x00000000, + 0x784a0000, + 0x00000000, + 0x784b0000, + 0x00000004, + 0x79170101, + 0x00000000, + 0x00000080, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x20000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x40000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x60000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x6101000e, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00001001, + 0x00001001, + 0x00000001, + 0x00001001, + 0x61020001, + 0x00000000, + 0x00000000, + 0x79000002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78050006, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x40000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x80000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0xc0000000, + 0x00000000, + 0x00000000, + 0x79080001, + 0x00000000, + 0x00000000, + 0x790a0001, + 0x00000000, + 0x00000000, + 0x78060003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78070003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78040001, + 0x00000000, + 0x00000000, + 0x79110000, + 0x00000000, + 0x780d0000, + 0x00000000, + 0x79060000, + 0x00000000, + 0x7907001f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x7902000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x790c000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x780a0003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78080083, + 0x00004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x04004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x08004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x10004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x14004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x18004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x1c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x20004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x24004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x28004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x2c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x30004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x34004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x38004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x3c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x40004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x44004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x48004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x4c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x50004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x54004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x58004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x5c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x60004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x64004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x68004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x6c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x70004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x74004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x7c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x80004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78090043, + 0x02000000, + 0x22220000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x680b0001, + 0x78260000, + 0x00000000, + 0x78270000, + 0x00000000, + 0x78280000, + 0x00000000, + 0x78290000, + 0x00000000, + 0x782a0000, + 0x00000000, + 0x780e0000, + 0x00000dc1, + 0x78240000, + 0x00000e01, + 0x784f0000, + 0x80000100, + 0x784d0000, + 0x40000000, + 0x782b0000, + 0x00000000, + 0x782c0000, + 0x00000000, + 0x782d0000, + 0x00000000, + 0x782e0000, + 0x00000000, + 0x782f0000, + 0x00000000, + 0x780f0000, + 0x00000000, + 0x78230000, + 0x00000e60, + 0x78210000, + 0x00000e80, + 0x7b000005, + 0x00000004, + 0x00000001, + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000000, + 0x05000000, /* cmds end */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state start */ + 0x00000000, + 0x3f800000, + 0x3f800000, + 0x3f800000, + 0x3f800000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state end */ +}; + +RO_RENDERSTATE(8); diff --git a/drivers/gpu/drm/i915/gt/gen9_renderstate.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c new file mode 100644 index 000000000000..7d3ac02f0177 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen9_renderstate.c @@ -0,0 +1,999 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Generated by: intel-gpu-tools-1.19-177-g68e2eab2 + */ + +#include "intel_renderstate.h" + +static const u32 gen9_null_state_relocs[] = { + 0x000007a8, + 0x000007b4, + 0x000007bc, + 0x000007cc, + -1, +}; + +static const u32 gen9_null_state_batch[] = { + 0x7a000004, + 0x01000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x69040300, + 0x78140000, + 0x04000000, + 0x7820000a, + 0x00000000, + 0x00000000, + 0x80000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78130002, + 0x00000000, + 0x00000000, + 0x02001808, + 0x781f0004, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78510009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78100007, + 0x00000000, + 0x00000000, + 0x00010000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781b0007, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000800, + 0x00000000, + 0x78110008, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781e0003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781d0009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78120002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78500003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781c0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x780c0000, + 0x00000000, + 0x78520003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78300000, + 0x08010040, + 0x78310000, + 0x1e000000, + 0x78320000, + 0x1e000000, + 0x78330000, + 0x1e000000, + 0x79190002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x791a0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x791b0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79120000, + 0x00000000, + 0x79130000, + 0x00000000, + 0x79140000, + 0x00000000, + 0x79150000, + 0x00000000, + 0x79160000, + 0x00000000, + 0x78150009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78190009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781a0009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78160009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78170009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78490001, + 0x00000000, + 0x00000000, + 0x784a0000, + 0x00000000, + 0x784b0000, + 0x00000004, + 0x79170101, + 0x00000000, + 0x00000080, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x20000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x40000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x60000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x61010011, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00001001, + 0x00001001, + 0x00000001, + 0x00001001, + 0x00000000, + 0x00000000, + 0x00000000, + 0x61020001, + 0x00000000, + 0x00000000, + 0x79000002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78050006, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x40000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x80000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0xc0000000, + 0x00000000, + 0x00000000, + 0x79080001, + 0x00000000, + 0x00000000, + 0x790a0001, + 0x00000000, + 0x00000000, + 0x78060003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78070003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78040001, + 0x00000000, + 0x00000000, + 0x79110000, + 0x00000000, + 0x780d0000, + 0x00000000, + 0x79060000, + 0x00000000, + 0x7907001f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x7902000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x790c000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x780a0003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78080083, + 0x00004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x04004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x08004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x10004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x14004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x18004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x1c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x20004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x24004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x28004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x2c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x30004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x34004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x38004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x3c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x40004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x44004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x48004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x4c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x50004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x54004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x58004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x5c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x60004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x64004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x68004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x6c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x70004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x74004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x7c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x80004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78090043, + 0x02000000, + 0x22220000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78550003, + 0x0000000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x680b0001, + 0x780e0000, + 0x00000e01, + 0x78240000, + 0x00000e41, + 0x784f0000, + 0x80000100, + 0x784d0000, + 0x40000000, + 0x782b0000, + 0x00000000, + 0x782c0000, + 0x00000000, + 0x782d0000, + 0x00000000, + 0x782e0000, + 0x00000000, + 0x782f0000, + 0x00000000, + 0x780f0000, + 0x00000000, + 0x78230000, + 0x00000ea0, + 0x78210000, + 0x00000ec0, + 0x78260000, + 0x00000000, + 0x78270000, + 0x00000000, + 0x78280000, + 0x00000000, + 0x78290000, + 0x00000000, + 0x782a0000, + 0x00000000, + 0x7b000005, + 0x00000004, + 0x00000001, + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000000, + 0x05000000, /* cmds end */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state start */ + 0x00000000, + 0x3f800000, + 0x3f800000, + 0x3f800000, + 0x3f800000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state end */ +}; + +RO_RENDERSTATE(9); diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c new file mode 100644 index 000000000000..55317081d48b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -0,0 +1,364 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/kthread.h> +#include <trace/events/dma_fence.h> +#include <uapi/linux/sched/types.h> + +#include "i915_drv.h" +#include "i915_trace.h" + +static void irq_enable(struct intel_engine_cs *engine) +{ + if (!engine->irq_enable) + return; + + /* Caller disables interrupts */ + spin_lock(&engine->gt->irq_lock); + engine->irq_enable(engine); + spin_unlock(&engine->gt->irq_lock); +} + +static void irq_disable(struct intel_engine_cs *engine) +{ + if (!engine->irq_disable) + return; + + /* Caller disables interrupts */ + spin_lock(&engine->gt->irq_lock); + engine->irq_disable(engine); + spin_unlock(&engine->gt->irq_lock); +} + +static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ + lockdep_assert_held(&b->irq_lock); + + GEM_BUG_ON(!b->irq_enabled); + if (!--b->irq_enabled) + irq_disable(container_of(b, + struct intel_engine_cs, + breadcrumbs)); + + b->irq_armed = false; +} + +void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + unsigned long flags; + + if (!b->irq_armed) + return; + + spin_lock_irqsave(&b->irq_lock, flags); + if (b->irq_armed) + __intel_breadcrumbs_disarm_irq(b); + spin_unlock_irqrestore(&b->irq_lock, flags); +} + +static inline bool __request_completed(const struct i915_request *rq) +{ + return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); +} + +__maybe_unused static bool +check_signal_order(struct intel_context *ce, struct i915_request *rq) +{ + if (!list_is_last(&rq->signal_link, &ce->signals) && + i915_seqno_passed(rq->fence.seqno, + list_next_entry(rq, signal_link)->fence.seqno)) + return false; + + if (!list_is_first(&rq->signal_link, &ce->signals) && + i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, + rq->fence.seqno)) + return false; + + return true; +} + +static bool +__dma_fence_signal(struct dma_fence *fence) +{ + return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags); +} + +static void +__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp) +{ + fence->timestamp = timestamp; + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); + trace_dma_fence_signaled(fence); +} + +static void +__dma_fence_signal__notify(struct dma_fence *fence, + const struct list_head *list) +{ + struct dma_fence_cb *cur, *tmp; + + lockdep_assert_held(fence->lock); + + list_for_each_entry_safe(cur, tmp, list, node) { + INIT_LIST_HEAD(&cur->node); + cur->func(fence, cur); + } +} + +void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + const ktime_t timestamp = ktime_get(); + struct intel_context *ce, *cn; + struct list_head *pos, *next; + unsigned long flags; + LIST_HEAD(signal); + + spin_lock_irqsave(&b->irq_lock, flags); + + if (b->irq_armed && list_empty(&b->signalers)) + __intel_breadcrumbs_disarm_irq(b); + + list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { + GEM_BUG_ON(list_empty(&ce->signals)); + + list_for_each_safe(pos, next, &ce->signals) { + struct i915_request *rq = + list_entry(pos, typeof(*rq), signal_link); + + GEM_BUG_ON(!check_signal_order(ce, rq)); + + if (!__request_completed(rq)) + break; + + GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, + &rq->fence.flags)); + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + + if (!__dma_fence_signal(&rq->fence)) + continue; + + /* + * Queue for execution after dropping the signaling + * spinlock as the callback chain may end up adding + * more signalers to the same context or engine. + */ + i915_request_get(rq); + list_add_tail(&rq->signal_link, &signal); + } + + /* + * We process the list deletion in bulk, only using a list_add + * (not list_move) above but keeping the status of + * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit. + */ + if (!list_is_first(pos, &ce->signals)) { + /* Advance the list to the first incomplete request */ + __list_del_many(&ce->signals, pos); + if (&ce->signals == pos) /* now empty */ + list_del_init(&ce->signal_link); + } + } + + spin_unlock_irqrestore(&b->irq_lock, flags); + + list_for_each_safe(pos, next, &signal) { + struct i915_request *rq = + list_entry(pos, typeof(*rq), signal_link); + struct list_head cb_list; + + spin_lock_irqsave(&rq->lock, flags); + list_replace(&rq->fence.cb_list, &cb_list); + __dma_fence_signal__timestamp(&rq->fence, timestamp); + __dma_fence_signal__notify(&rq->fence, &cb_list); + spin_unlock_irqrestore(&rq->lock, flags); + + i915_request_put(rq); + } +} + +static void signal_irq_work(struct irq_work *work) +{ + struct intel_engine_cs *engine = + container_of(work, typeof(*engine), breadcrumbs.irq_work); + + intel_engine_breadcrumbs_irq(engine); +} + +static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) +{ + struct intel_engine_cs *engine = + container_of(b, struct intel_engine_cs, breadcrumbs); + + lockdep_assert_held(&b->irq_lock); + if (b->irq_armed) + return; + + /* + * The breadcrumb irq will be disarmed on the interrupt after the + * waiters are signaled. This gives us a single interrupt window in + * which we can add a new waiter and avoid the cost of re-enabling + * the irq. + */ + b->irq_armed = true; + + /* + * Since we are waiting on a request, the GPU should be busy + * and should have its own rpm reference. This is tracked + * by i915->gt.awake, we can forgo holding our own wakref + * for the interrupt as before i915->gt.awake is released (when + * the driver is idle) we disarm the breadcrumbs. + */ + + if (!b->irq_enabled++) + irq_enable(engine); +} + +void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + spin_lock_init(&b->irq_lock); + INIT_LIST_HEAD(&b->signalers); + + init_irq_work(&b->irq_work, signal_irq_work); +} + +void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + unsigned long flags; + + spin_lock_irqsave(&b->irq_lock, flags); + + if (b->irq_enabled) + irq_enable(engine); + else + irq_disable(engine); + + spin_unlock_irqrestore(&b->irq_lock, flags); +} + +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) +{ +} + +bool i915_request_enable_breadcrumb(struct i915_request *rq) +{ + lockdep_assert_held(&rq->lock); + + if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { + struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; + struct intel_context *ce = rq->hw_context; + struct list_head *pos; + + spin_lock(&b->irq_lock); + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); + + __intel_breadcrumbs_arm_irq(b); + + /* + * We keep the seqno in retirement order, so we can break + * inside intel_engine_breadcrumbs_irq as soon as we've passed + * the last completed request (or seen a request that hasn't + * event started). We could iterate the timeline->requests list, + * but keeping a separate signalers_list has the advantage of + * hopefully being much smaller than the full list and so + * provides faster iteration and detection when there are no + * more interrupts required for this context. + * + * We typically expect to add new signalers in order, so we + * start looking for our insertion point from the tail of + * the list. + */ + list_for_each_prev(pos, &ce->signals) { + struct i915_request *it = + list_entry(pos, typeof(*it), signal_link); + + if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno)) + break; + } + list_add(&rq->signal_link, pos); + if (pos == &ce->signals) /* catch transitions from empty list */ + list_move_tail(&ce->signal_link, &b->signalers); + GEM_BUG_ON(!check_signal_order(ce, rq)); + + set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + spin_unlock(&b->irq_lock); + } + + return !__request_completed(rq); +} + +void i915_request_cancel_breadcrumb(struct i915_request *rq) +{ + struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; + + lockdep_assert_held(&rq->lock); + + /* + * We must wait for b->irq_lock so that we know the interrupt handler + * has released its reference to the intel_context and has completed + * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if + * required). + */ + spin_lock(&b->irq_lock); + if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { + struct intel_context *ce = rq->hw_context; + + list_del(&rq->signal_link); + if (list_empty(&ce->signals)) + list_del_init(&ce->signal_link); + + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + } + spin_unlock(&b->irq_lock); +} + +void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, + struct drm_printer *p) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct intel_context *ce; + struct i915_request *rq; + + if (list_empty(&b->signalers)) + return; + + drm_printf(p, "Signals:\n"); + + spin_lock_irq(&b->irq_lock); + list_for_each_entry(ce, &b->signalers, signal_link) { + list_for_each_entry(rq, &ce->signals, signal_link) { + drm_printf(p, "\t[%llx:%llx%s] @ %dms\n", + rq->fence.context, rq->fence.seqno, + i915_request_completed(rq) ? "!" : + i915_request_started(rq) ? "*" : + "", + jiffies_to_msecs(jiffies - rq->emitted_jiffies)); + } + } + spin_unlock_irq(&b->irq_lock); +} diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c new file mode 100644 index 000000000000..ee9d2bcd2c13 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -0,0 +1,353 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "gem/i915_gem_context.h" +#include "gem/i915_gem_pm.h" + +#include "i915_drv.h" +#include "i915_globals.h" + +#include "intel_context.h" +#include "intel_engine.h" +#include "intel_engine_pm.h" +#include "intel_ring.h" + +static struct i915_global_context { + struct i915_global base; + struct kmem_cache *slab_ce; +} global; + +static struct intel_context *intel_context_alloc(void) +{ + return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); +} + +void intel_context_free(struct intel_context *ce) +{ + kmem_cache_free(global.slab_ce, ce); +} + +struct intel_context * +intel_context_create(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce; + + ce = intel_context_alloc(); + if (!ce) + return ERR_PTR(-ENOMEM); + + intel_context_init(ce, ctx, engine); + return ce; +} + +int __intel_context_do_pin(struct intel_context *ce) +{ + int err; + + if (mutex_lock_interruptible(&ce->pin_mutex)) + return -EINTR; + + if (likely(!atomic_read(&ce->pin_count))) { + intel_wakeref_t wakeref; + + if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { + err = ce->ops->alloc(ce); + if (unlikely(err)) + goto err; + + __set_bit(CONTEXT_ALLOC_BIT, &ce->flags); + } + + err = 0; + with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref) + err = ce->ops->pin(ce); + if (err) + goto err; + + GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n", + ce->engine->name, ce->timeline->fence_context, + ce->ring->head, ce->ring->tail); + + i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ + + smp_mb__before_atomic(); /* flush pin before it is visible */ + } + + atomic_inc(&ce->pin_count); + GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ + + mutex_unlock(&ce->pin_mutex); + return 0; + +err: + mutex_unlock(&ce->pin_mutex); + return err; +} + +void intel_context_unpin(struct intel_context *ce) +{ + if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) + return; + + /* We may be called from inside intel_context_pin() to evict another */ + intel_context_get(ce); + mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); + + if (likely(atomic_dec_and_test(&ce->pin_count))) { + GEM_TRACE("%s context:%llx retire\n", + ce->engine->name, ce->timeline->fence_context); + + ce->ops->unpin(ce); + + i915_gem_context_put(ce->gem_context); + intel_context_active_release(ce); + } + + mutex_unlock(&ce->pin_mutex); + intel_context_put(ce); +} + +static int __context_pin_state(struct i915_vma *vma) +{ + u64 flags; + int err; + + flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; + flags |= PIN_HIGH | PIN_GLOBAL; + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + return err; + + /* + * And mark it as a globally pinned object to let the shrinker know + * it cannot reclaim the object until we release it. + */ + i915_vma_make_unshrinkable(vma); + vma->obj->mm.dirty = true; + + return 0; +} + +static void __context_unpin_state(struct i915_vma *vma) +{ + i915_vma_make_shrinkable(vma); + __i915_vma_unpin(vma); +} + +__i915_active_call +static void __intel_context_retire(struct i915_active *active) +{ + struct intel_context *ce = container_of(active, typeof(*ce), active); + + GEM_TRACE("%s context:%llx retire\n", + ce->engine->name, ce->timeline->fence_context); + + if (ce->state) + __context_unpin_state(ce->state); + + intel_timeline_unpin(ce->timeline); + intel_ring_unpin(ce->ring); + + intel_context_put(ce); +} + +static int __intel_context_active(struct i915_active *active) +{ + struct intel_context *ce = container_of(active, typeof(*ce), active); + int err; + + intel_context_get(ce); + + err = intel_ring_pin(ce->ring); + if (err) + goto err_put; + + err = intel_timeline_pin(ce->timeline); + if (err) + goto err_ring; + + if (!ce->state) + return 0; + + err = __context_pin_state(ce->state); + if (err) + goto err_timeline; + + return 0; + +err_timeline: + intel_timeline_unpin(ce->timeline); +err_ring: + intel_ring_unpin(ce->ring); +err_put: + intel_context_put(ce); + return err; +} + +int intel_context_active_acquire(struct intel_context *ce) +{ + int err; + + err = i915_active_acquire(&ce->active); + if (err) + return err; + + /* Preallocate tracking nodes */ + if (!i915_gem_context_is_kernel(ce->gem_context)) { + err = i915_active_acquire_preallocate_barrier(&ce->active, + ce->engine); + if (err) { + i915_active_release(&ce->active); + return err; + } + } + + return 0; +} + +void intel_context_active_release(struct intel_context *ce) +{ + /* Nodes preallocated in intel_context_active() */ + i915_active_acquire_barrier(&ce->active); + i915_active_release(&ce->active); +} + +void +intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct i915_address_space *vm; + + GEM_BUG_ON(!engine->cops); + + kref_init(&ce->ref); + + ce->gem_context = ctx; + rcu_read_lock(); + vm = rcu_dereference(ctx->vm); + if (vm) + ce->vm = i915_vm_get(vm); + else + ce->vm = i915_vm_get(&engine->gt->ggtt->vm); + rcu_read_unlock(); + if (ctx->timeline) + ce->timeline = intel_timeline_get(ctx->timeline); + + ce->engine = engine; + ce->ops = engine->cops; + ce->sseu = engine->sseu; + ce->ring = __intel_context_ring_size(SZ_16K); + + INIT_LIST_HEAD(&ce->signal_link); + INIT_LIST_HEAD(&ce->signals); + + mutex_init(&ce->pin_mutex); + + i915_active_init(&ce->active, + __intel_context_active, __intel_context_retire); +} + +void intel_context_fini(struct intel_context *ce) +{ + if (ce->timeline) + intel_timeline_put(ce->timeline); + i915_vm_put(ce->vm); + + mutex_destroy(&ce->pin_mutex); + i915_active_fini(&ce->active); +} + +static void i915_global_context_shrink(void) +{ + kmem_cache_shrink(global.slab_ce); +} + +static void i915_global_context_exit(void) +{ + kmem_cache_destroy(global.slab_ce); +} + +static struct i915_global_context global = { { + .shrink = i915_global_context_shrink, + .exit = i915_global_context_exit, +} }; + +int __init i915_global_context_init(void) +{ + global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); + if (!global.slab_ce) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} + +void intel_context_enter_engine(struct intel_context *ce) +{ + intel_engine_pm_get(ce->engine); + intel_timeline_enter(ce->timeline); +} + +void intel_context_exit_engine(struct intel_context *ce) +{ + intel_timeline_exit(ce->timeline); + intel_engine_pm_put(ce->engine); +} + +int intel_context_prepare_remote_request(struct intel_context *ce, + struct i915_request *rq) +{ + struct intel_timeline *tl = ce->timeline; + int err; + + /* Only suitable for use in remotely modifying this context */ + GEM_BUG_ON(rq->hw_context == ce); + + if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ + err = mutex_lock_interruptible_nested(&tl->mutex, + SINGLE_DEPTH_NESTING); + if (err) + return err; + + /* Queue this switch after current activity by this context. */ + err = i915_active_fence_set(&tl->last_request, rq); + mutex_unlock(&tl->mutex); + if (err) + return err; + } + + /* + * Guarantee context image and the timeline remains pinned until the + * modifying request is retired by setting the ce activity tracker. + * + * But we only need to take one pin on the account of it. Or in other + * words transfer the pinned ce object to tracked active request. + */ + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + return i915_active_add_request(&ce->active, rq); +} + +struct i915_request *intel_context_create_request(struct intel_context *ce) +{ + struct i915_request *rq; + int err; + + err = intel_context_pin(ce); + if (unlikely(err)) + return ERR_PTR(err); + + rq = i915_request_create(ce); + intel_context_unpin(ce); + + return rq; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_context.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h new file mode 100644 index 000000000000..68b3d317d959 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_CONTEXT_H__ +#define __INTEL_CONTEXT_H__ + +#include <linux/lockdep.h> + +#include "i915_active.h" +#include "intel_context_types.h" +#include "intel_engine_types.h" +#include "intel_ring_types.h" +#include "intel_timeline_types.h" + +void intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine); +void intel_context_fini(struct intel_context *ce); + +struct intel_context * +intel_context_create(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + +void intel_context_free(struct intel_context *ce); + +/** + * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context + * @ce - the context + * + * Acquire a lock on the pinned status of the HW context, such that the context + * can neither be bound to the GPU or unbound whilst the lock is held, i.e. + * intel_context_is_pinned() remains stable. + */ +static inline int intel_context_lock_pinned(struct intel_context *ce) + __acquires(ce->pin_mutex) +{ + return mutex_lock_interruptible(&ce->pin_mutex); +} + +/** + * intel_context_is_pinned - Reports the 'pinned' status + * @ce - the context + * + * While in use by the GPU, the context, along with its ring and page + * tables is pinned into memory and the GTT. + * + * Returns: true if the context is currently pinned for use by the GPU. + */ +static inline bool +intel_context_is_pinned(struct intel_context *ce) +{ + return atomic_read(&ce->pin_count); +} + +/** + * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status + * @ce - the context + * + * Releases the lock earlier acquired by intel_context_unlock_pinned(). + */ +static inline void intel_context_unlock_pinned(struct intel_context *ce) + __releases(ce->pin_mutex) +{ + mutex_unlock(&ce->pin_mutex); +} + +int __intel_context_do_pin(struct intel_context *ce); + +static inline int intel_context_pin(struct intel_context *ce) +{ + if (likely(atomic_inc_not_zero(&ce->pin_count))) + return 0; + + return __intel_context_do_pin(ce); +} + +static inline void __intel_context_pin(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_pinned(ce)); + atomic_inc(&ce->pin_count); +} + +void intel_context_unpin(struct intel_context *ce); + +void intel_context_enter_engine(struct intel_context *ce); +void intel_context_exit_engine(struct intel_context *ce); + +static inline void intel_context_enter(struct intel_context *ce) +{ + lockdep_assert_held(&ce->timeline->mutex); + if (!ce->active_count++) + ce->ops->enter(ce); +} + +static inline void intel_context_mark_active(struct intel_context *ce) +{ + lockdep_assert_held(&ce->timeline->mutex); + ++ce->active_count; +} + +static inline void intel_context_exit(struct intel_context *ce) +{ + lockdep_assert_held(&ce->timeline->mutex); + GEM_BUG_ON(!ce->active_count); + if (!--ce->active_count) + ce->ops->exit(ce); +} + +int intel_context_active_acquire(struct intel_context *ce); +void intel_context_active_release(struct intel_context *ce); + +static inline struct intel_context *intel_context_get(struct intel_context *ce) +{ + kref_get(&ce->ref); + return ce; +} + +static inline void intel_context_put(struct intel_context *ce) +{ + kref_put(&ce->ref, ce->ops->destroy); +} + +static inline struct intel_timeline *__must_check +intel_context_timeline_lock(struct intel_context *ce) + __acquires(&ce->timeline->mutex) +{ + struct intel_timeline *tl = ce->timeline; + int err; + + err = mutex_lock_interruptible(&tl->mutex); + if (err) + return ERR_PTR(err); + + return tl; +} + +static inline void intel_context_timeline_unlock(struct intel_timeline *tl) + __releases(&tl->mutex) +{ + mutex_unlock(&tl->mutex); +} + +int intel_context_prepare_remote_request(struct intel_context *ce, + struct i915_request *rq); + +struct i915_request *intel_context_create_request(struct intel_context *ce); + +static inline struct intel_ring *__intel_context_ring_size(u64 sz) +{ + return u64_to_ptr(struct intel_ring, sz); +} + +#endif /* __INTEL_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h new file mode 100644 index 000000000000..6959b05ae5f8 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_CONTEXT_TYPES__ +#define __INTEL_CONTEXT_TYPES__ + +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/types.h> + +#include "i915_active_types.h" +#include "i915_utils.h" +#include "intel_engine_types.h" +#include "intel_sseu.h" + +struct i915_gem_context; +struct i915_vma; +struct intel_context; +struct intel_ring; + +struct intel_context_ops { + int (*alloc)(struct intel_context *ce); + + int (*pin)(struct intel_context *ce); + void (*unpin)(struct intel_context *ce); + + void (*enter)(struct intel_context *ce); + void (*exit)(struct intel_context *ce); + + void (*reset)(struct intel_context *ce); + void (*destroy)(struct kref *kref); +}; + +struct intel_context { + struct kref ref; + + struct intel_engine_cs *engine; + struct intel_engine_cs *inflight; +#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2) +#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2) + + struct i915_address_space *vm; + struct i915_gem_context *gem_context; + + struct list_head signal_link; + struct list_head signals; + + struct i915_vma *state; + struct intel_ring *ring; + struct intel_timeline *timeline; + + unsigned long flags; +#define CONTEXT_ALLOC_BIT 0 + + u32 *lrc_reg_state; + u64 lrc_desc; + u32 tag; /* cookie passed to HW to track this context on submission */ + + unsigned int active_count; /* protected by timeline->mutex */ + + atomic_t pin_count; + struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ + + /** + * active: Active tracker for the rq activity (inc. external) on this + * intel_context object. + */ + struct i915_active active; + + const struct intel_context_ops *ops; + + /** sseu: Control eu/slice partitioning */ + struct intel_sseu sseu; +}; + +#endif /* __INTEL_CONTEXT_TYPES__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h new file mode 100644 index 000000000000..bc3b72bfa9e3 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef _INTEL_RINGBUFFER_H_ +#define _INTEL_RINGBUFFER_H_ + +#include <drm/drm_util.h> + +#include <linux/hashtable.h> +#include <linux/irq_work.h> +#include <linux/random.h> +#include <linux/seqlock.h> + +#include "i915_pmu.h" +#include "i915_reg.h" +#include "i915_request.h" +#include "i915_selftest.h" +#include "gt/intel_timeline.h" +#include "intel_engine_types.h" +#include "intel_gpu_commands.h" +#include "intel_workarounds.h" + +struct drm_printer; +struct intel_gt; + +/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, + * but keeps the logic simple. Indeed, the whole purpose of this macro is just + * to give some inclination as to some of the magic values used in the various + * workarounds! + */ +#define CACHELINE_BYTES 64 +#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) + +/* + * The register defines to be used with the following macros need to accept a + * base param, e.g: + * + * REG_FOO(base) _MMIO((base) + <relative offset>) + * ENGINE_READ(engine, REG_FOO); + * + * register arrays are to be defined and accessed as follows: + * + * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>) + * ENGINE_READ_IDX(engine, REG_BAR, i) + */ + +#define __ENGINE_REG_OP(op__, engine__, ...) \ + intel_uncore_##op__((engine__)->uncore, __VA_ARGS__) + +#define __ENGINE_READ_OP(op__, engine__, reg__) \ + __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base)) + +#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__) +#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__) +#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__) +#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__) +#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__) + +#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \ + __ENGINE_REG_OP(read64_2x32, (engine__), \ + lower_reg__((engine__)->mmio_base), \ + upper_reg__((engine__)->mmio_base)) + +#define ENGINE_READ_IDX(engine__, reg__, idx__) \ + __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__))) + +#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \ + __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__)) + +#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__) +#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__) +#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__) + +#define GEN6_RING_FAULT_REG_READ(engine__) \ + intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__)) + +#define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \ + intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__)) + +#define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \ +({ \ + u32 __val; \ +\ + __val = intel_uncore_read((engine__)->uncore, \ + RING_FAULT_REG(engine__)); \ + __val &= ~(clear__); \ + __val |= (set__); \ + intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \ + __val); \ +}) + +/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to + * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. + */ + +static inline unsigned int +execlists_num_ports(const struct intel_engine_execlists * const execlists) +{ + return execlists->port_mask + 1; +} + +static inline struct i915_request * +execlists_active(const struct intel_engine_execlists *execlists) +{ + GEM_BUG_ON(execlists->active - execlists->inflight > + execlists_num_ports(execlists)); + return READ_ONCE(*execlists->active); +} + +static inline void +execlists_active_lock_bh(struct intel_engine_execlists *execlists) +{ + local_bh_disable(); /* prevent local softirq and lock recursion */ + tasklet_lock(&execlists->tasklet); +} + +static inline void +execlists_active_unlock_bh(struct intel_engine_execlists *execlists) +{ + tasklet_unlock(&execlists->tasklet); + local_bh_enable(); /* restore softirq, and kick ksoftirqd! */ +} + +struct i915_request * +execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); + +static inline u32 +intel_read_status_page(const struct intel_engine_cs *engine, int reg) +{ + /* Ensure that the compiler doesn't optimize away the load. */ + return READ_ONCE(engine->status_page.addr[reg]); +} + +static inline void +intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) +{ + /* Writing into the status page should be done sparingly. Since + * we do when we are uncertain of the device state, we take a bit + * of extra paranoia to try and ensure that the HWS takes the value + * we give and that it doesn't end up trapped inside the CPU! + */ + if (static_cpu_has(X86_FEATURE_CLFLUSH)) { + mb(); + clflush(&engine->status_page.addr[reg]); + engine->status_page.addr[reg] = value; + clflush(&engine->status_page.addr[reg]); + mb(); + } else { + WRITE_ONCE(engine->status_page.addr[reg], value); + } +} + +/* + * Reads a dword out of the status page, which is written to from the command + * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or + * MI_STORE_DATA_IMM. + * + * The following dwords have a reserved meaning: + * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. + * 0x04: ring 0 head pointer + * 0x05: ring 1 head pointer (915-class) + * 0x06: ring 2 head pointer (915-class) + * 0x10-0x1b: Context status DWords (GM45) + * 0x1f: Last written status offset. (GM45) + * 0x20-0x2f: Reserved (Gen6+) + * + * The area from dword 0x30 to 0x3ff is available for driver usage. + */ +#define I915_GEM_HWS_PREEMPT 0x32 +#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32)) +#define I915_GEM_HWS_SEQNO 0x40 +#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32)) +#define I915_GEM_HWS_SCRATCH 0x80 +#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32)) + +#define I915_HWS_CSB_BUF0_INDEX 0x10 +#define I915_HWS_CSB_WRITE_INDEX 0x1f +#define CNL_HWS_CSB_WRITE_INDEX 0x2f + +void intel_engine_stop(struct intel_engine_cs *engine); +void intel_engine_cleanup(struct intel_engine_cs *engine); + +int intel_engines_init_mmio(struct intel_gt *gt); +int intel_engines_setup(struct intel_gt *gt); +int intel_engines_init(struct intel_gt *gt); +void intel_engines_cleanup(struct intel_gt *gt); + +int intel_engine_init_common(struct intel_engine_cs *engine); +void intel_engine_cleanup_common(struct intel_engine_cs *engine); + +int intel_ring_submission_setup(struct intel_engine_cs *engine); +int intel_ring_submission_init(struct intel_engine_cs *engine); + +int intel_engine_stop_cs(struct intel_engine_cs *engine); +void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine); + +void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask); + +u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); +u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); + +void intel_engine_get_instdone(struct intel_engine_cs *engine, + struct intel_instdone *instdone); + +void intel_engine_init_execlists(struct intel_engine_cs *engine); + +void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); + +void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); + +static inline void +intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine) +{ + irq_work_queue(&engine->breadcrumbs.irq_work); +} + +void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine); + +void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); + +void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, + struct drm_printer *p); + +static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) +{ + memset(batch, 0, 6 * sizeof(u32)); + + batch[0] = GFX_OP_PIPE_CONTROL(6); + batch[1] = flags; + batch[2] = offset; + + return batch + 6; +} + +static inline u32 * +gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags) +{ + /* We're using qword write, offset should be aligned to 8 bytes. */ + GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); + + /* w/a for post sync ops following a GPGPU operation we + * need a prior CS_STALL, which is emitted by the flush + * following the batch. + */ + *cs++ = GFX_OP_PIPE_CONTROL(6); + *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB; + *cs++ = gtt_offset; + *cs++ = 0; + *cs++ = value; + /* We're thrashing one dword of HWS. */ + *cs++ = 0; + + return cs; +} + +static inline u32 * +gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) +{ + /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ + GEM_BUG_ON(gtt_offset & (1 << 5)); + /* Offset should be aligned to 8 bytes for both (QW/DW) write types */ + GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); + + *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags; + *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT; + *cs++ = 0; + *cs++ = value; + + return cs; +} + +static inline void __intel_engine_reset(struct intel_engine_cs *engine, + bool stalled) +{ + if (engine->reset.reset) + engine->reset.reset(engine, stalled); + engine->serial++; /* contexts lost */ +} + +bool intel_engines_are_idle(struct intel_gt *gt); +bool intel_engine_is_idle(struct intel_engine_cs *engine); +void intel_engine_flush_submission(struct intel_engine_cs *engine); + +void intel_engines_reset_default_submission(struct intel_gt *gt); + +bool intel_engine_can_store_dword(struct intel_engine_cs *engine); + +__printf(3, 4) +void intel_engine_dump(struct intel_engine_cs *engine, + struct drm_printer *m, + const char *header, ...); + +int intel_enable_engine_stats(struct intel_engine_cs *engine); +void intel_disable_engine_stats(struct intel_engine_cs *engine); + +ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); + +struct i915_request * +intel_engine_find_active_request(struct intel_engine_cs *engine); + +u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class); + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) + +static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) +{ + if (!execlists->preempt_hang.inject_hang) + return false; + + complete(&execlists->preempt_hang.completion); + return true; +} + +#else + +static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) +{ + return false; +} + +#endif + +void intel_engine_init_active(struct intel_engine_cs *engine, + unsigned int subclass); +#define ENGINE_PHYSICAL 0 +#define ENGINE_MOCK 1 +#define ENGINE_VIRTUAL 2 + +static inline bool +intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return false; + + return intel_engine_has_preemption(engine); +} + +static inline bool +intel_engine_has_timeslices(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return false; + + return intel_engine_has_semaphores(engine); +} + +#endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c new file mode 100644 index 000000000000..5ca3ec911e50 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -0,0 +1,1698 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <drm/drm_print.h> + +#include "gem/i915_gem_context.h" + +#include "i915_drv.h" + +#include "gt/intel_gt.h" + +#include "intel_engine.h" +#include "intel_engine_pm.h" +#include "intel_engine_pool.h" +#include "intel_engine_user.h" +#include "intel_context.h" +#include "intel_lrc.h" +#include "intel_reset.h" +#include "intel_ring.h" + +/* Haswell does have the CXT_SIZE register however it does not appear to be + * valid. Now, docs explain in dwords what is in the context object. The full + * size is 70720 bytes, however, the power context and execlist context will + * never be saved (power context is stored elsewhere, and execlists don't work + * on HSW) - so the final size, including the extra state required for the + * Resource Streamer, is 66944 bytes, which rounds to 17 pages. + */ +#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) + +#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) +#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) +#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) +#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE) +#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE) + +#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) + +#define MAX_MMIO_BASES 3 +struct engine_info { + unsigned int hw_id; + u8 class; + u8 instance; + /* mmio bases table *must* be sorted in reverse gen order */ + struct engine_mmio_base { + u32 gen : 8; + u32 base : 24; + } mmio_bases[MAX_MMIO_BASES]; +}; + +static const struct engine_info intel_engines[] = { + [RCS0] = { + .hw_id = RCS0_HW, + .class = RENDER_CLASS, + .instance = 0, + .mmio_bases = { + { .gen = 1, .base = RENDER_RING_BASE } + }, + }, + [BCS0] = { + .hw_id = BCS0_HW, + .class = COPY_ENGINE_CLASS, + .instance = 0, + .mmio_bases = { + { .gen = 6, .base = BLT_RING_BASE } + }, + }, + [VCS0] = { + .hw_id = VCS0_HW, + .class = VIDEO_DECODE_CLASS, + .instance = 0, + .mmio_bases = { + { .gen = 11, .base = GEN11_BSD_RING_BASE }, + { .gen = 6, .base = GEN6_BSD_RING_BASE }, + { .gen = 4, .base = BSD_RING_BASE } + }, + }, + [VCS1] = { + .hw_id = VCS1_HW, + .class = VIDEO_DECODE_CLASS, + .instance = 1, + .mmio_bases = { + { .gen = 11, .base = GEN11_BSD2_RING_BASE }, + { .gen = 8, .base = GEN8_BSD2_RING_BASE } + }, + }, + [VCS2] = { + .hw_id = VCS2_HW, + .class = VIDEO_DECODE_CLASS, + .instance = 2, + .mmio_bases = { + { .gen = 11, .base = GEN11_BSD3_RING_BASE } + }, + }, + [VCS3] = { + .hw_id = VCS3_HW, + .class = VIDEO_DECODE_CLASS, + .instance = 3, + .mmio_bases = { + { .gen = 11, .base = GEN11_BSD4_RING_BASE } + }, + }, + [VECS0] = { + .hw_id = VECS0_HW, + .class = VIDEO_ENHANCEMENT_CLASS, + .instance = 0, + .mmio_bases = { + { .gen = 11, .base = GEN11_VEBOX_RING_BASE }, + { .gen = 7, .base = VEBOX_RING_BASE } + }, + }, + [VECS1] = { + .hw_id = VECS1_HW, + .class = VIDEO_ENHANCEMENT_CLASS, + .instance = 1, + .mmio_bases = { + { .gen = 11, .base = GEN11_VEBOX2_RING_BASE } + }, + }, +}; + +/** + * intel_engine_context_size() - return the size of the context for an engine + * @dev_priv: i915 device private + * @class: engine class + * + * Each engine class may require a different amount of space for a context + * image. + * + * Return: size (in bytes) of an engine class specific context image + * + * Note: this size includes the HWSP, which is part of the context image + * in LRC mode, but does not include the "shared data page" used with + * GuC submission. The caller should account for this if using the GuC. + */ +u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) +{ + u32 cxt_size; + + BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE); + + switch (class) { + case RENDER_CLASS: + switch (INTEL_GEN(dev_priv)) { + default: + MISSING_CASE(INTEL_GEN(dev_priv)); + return DEFAULT_LR_CONTEXT_RENDER_SIZE; + case 12: + case 11: + return GEN11_LR_CONTEXT_RENDER_SIZE; + case 10: + return GEN10_LR_CONTEXT_RENDER_SIZE; + case 9: + return GEN9_LR_CONTEXT_RENDER_SIZE; + case 8: + return GEN8_LR_CONTEXT_RENDER_SIZE; + case 7: + if (IS_HASWELL(dev_priv)) + return HSW_CXT_TOTAL_SIZE; + + cxt_size = I915_READ(GEN7_CXT_SIZE); + return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64, + PAGE_SIZE); + case 6: + cxt_size = I915_READ(CXT_SIZE); + return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64, + PAGE_SIZE); + case 5: + case 4: + /* + * There is a discrepancy here between the size reported + * by the register and the size of the context layout + * in the docs. Both are described as authorative! + * + * The discrepancy is on the order of a few cachelines, + * but the total is under one page (4k), which is our + * minimum allocation anyway so it should all come + * out in the wash. + */ + cxt_size = I915_READ(CXT_SIZE) + 1; + DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n", + INTEL_GEN(dev_priv), + cxt_size * 64, + cxt_size - 1); + return round_up(cxt_size * 64, PAGE_SIZE); + case 3: + case 2: + /* For the special day when i810 gets merged. */ + case 1: + return 0; + } + break; + default: + MISSING_CASE(class); + /* fall through */ + case VIDEO_DECODE_CLASS: + case VIDEO_ENHANCEMENT_CLASS: + case COPY_ENGINE_CLASS: + if (INTEL_GEN(dev_priv) < 8) + return 0; + return GEN8_LR_CONTEXT_OTHER_SIZE; + } +} + +static u32 __engine_mmio_base(struct drm_i915_private *i915, + const struct engine_mmio_base *bases) +{ + int i; + + for (i = 0; i < MAX_MMIO_BASES; i++) + if (INTEL_GEN(i915) >= bases[i].gen) + break; + + GEM_BUG_ON(i == MAX_MMIO_BASES); + GEM_BUG_ON(!bases[i].base); + + return bases[i].base; +} + +static void __sprint_engine_name(struct intel_engine_cs *engine) +{ + /* + * Before we know what the uABI name for this engine will be, + * we still would like to keep track of this engine in the debug logs. + * We throw in a ' here as a reminder that this isn't its final name. + */ + GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u", + intel_engine_class_repr(engine->class), + engine->instance) >= sizeof(engine->name)); +} + +void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask) +{ + /* + * Though they added more rings on g4x/ilk, they did not add + * per-engine HWSTAM until gen6. + */ + if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS) + return; + + if (INTEL_GEN(engine->i915) >= 3) + ENGINE_WRITE(engine, RING_HWSTAM, mask); + else + ENGINE_WRITE16(engine, RING_HWSTAM, mask); +} + +static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) +{ + /* Mask off all writes into the unknown HWSP */ + intel_engine_set_hwsp_writemask(engine, ~0u); +} + +static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) +{ + const struct engine_info *info = &intel_engines[id]; + struct intel_engine_cs *engine; + + BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); + BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); + + if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine))) + return -EINVAL; + + if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS)) + return -EINVAL; + + if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) + return -EINVAL; + + if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance])) + return -EINVAL; + + engine = kzalloc(sizeof(*engine), GFP_KERNEL); + if (!engine) + return -ENOMEM; + + BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); + + engine->id = id; + engine->legacy_idx = INVALID_ENGINE; + engine->mask = BIT(id); + engine->i915 = gt->i915; + engine->gt = gt; + engine->uncore = gt->uncore; + engine->hw_id = engine->guc_id = info->hw_id; + engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases); + + engine->class = info->class; + engine->instance = info->instance; + __sprint_engine_name(engine); + + engine->props.heartbeat_interval_ms = + CONFIG_DRM_I915_HEARTBEAT_INTERVAL; + engine->props.preempt_timeout_ms = + CONFIG_DRM_I915_PREEMPT_TIMEOUT; + engine->props.stop_timeout_ms = + CONFIG_DRM_I915_STOP_TIMEOUT; + engine->props.timeslice_duration_ms = + CONFIG_DRM_I915_TIMESLICE_DURATION; + + /* + * To be overridden by the backend on setup. However to facilitate + * cleanup on error during setup, we always provide the destroy vfunc. + */ + engine->destroy = (typeof(engine->destroy))kfree; + + engine->context_size = intel_engine_context_size(gt->i915, + engine->class); + if (WARN_ON(engine->context_size > BIT(20))) + engine->context_size = 0; + if (engine->context_size) + DRIVER_CAPS(gt->i915)->has_logical_contexts = true; + + /* Nothing to do here, execute in order of dependencies */ + engine->schedule = NULL; + + seqlock_init(&engine->stats.lock); + + ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); + + /* Scrub mmio state on takeover */ + intel_engine_sanitize_mmio(engine); + + gt->engine_class[info->class][info->instance] = engine; + gt->engine[id] = engine; + + intel_engine_add_user(engine); + gt->i915->engine[id] = engine; + + return 0; +} + +static void __setup_engine_capabilities(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + if (engine->class == VIDEO_DECODE_CLASS) { + /* + * HEVC support is present on first engine instance + * before Gen11 and on all instances afterwards. + */ + if (INTEL_GEN(i915) >= 11 || + (INTEL_GEN(i915) >= 9 && engine->instance == 0)) + engine->uabi_capabilities |= + I915_VIDEO_CLASS_CAPABILITY_HEVC; + + /* + * SFC block is present only on even logical engine + * instances. + */ + if ((INTEL_GEN(i915) >= 11 && + RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) || + (INTEL_GEN(i915) >= 9 && engine->instance == 0)) + engine->uabi_capabilities |= + I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; + } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) { + if (INTEL_GEN(i915) >= 9) + engine->uabi_capabilities |= + I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; + } +} + +static void intel_setup_engine_capabilities(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + __setup_engine_capabilities(engine); +} + +/** + * intel_engines_cleanup() - free the resources allocated for Command Streamers + * @gt: pointer to struct intel_gt + */ +void intel_engines_cleanup(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) { + engine->destroy(engine); + gt->engine[id] = NULL; + gt->i915->engine[id] = NULL; + } +} + +/** + * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers + * @gt: pointer to struct intel_gt + * + * Return: non-zero if the initialization failed. + */ +int intel_engines_init_mmio(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_device_info *device_info = mkwrite_device_info(i915); + const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask; + unsigned int mask = 0; + unsigned int i; + int err; + + WARN_ON(engine_mask == 0); + WARN_ON(engine_mask & + GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); + + if (i915_inject_probe_failure(i915)) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { + if (!HAS_ENGINE(i915, i)) + continue; + + err = intel_engine_setup(gt, i); + if (err) + goto cleanup; + + mask |= BIT(i); + } + + /* + * Catch failures to update intel_engines table when the new engines + * are added to the driver by a warning and disabling the forgotten + * engines. + */ + if (WARN_ON(mask != engine_mask)) + device_info->engine_mask = mask; + + RUNTIME_INFO(i915)->num_engines = hweight32(mask); + + intel_gt_check_and_clear_faults(gt); + + intel_setup_engine_capabilities(gt); + + return 0; + +cleanup: + intel_engines_cleanup(gt); + return err; +} + +/** + * intel_engines_init() - init the Engine Command Streamers + * @gt: pointer to struct intel_gt + * + * Return: non-zero if the initialization failed. + */ +int intel_engines_init(struct intel_gt *gt) +{ + int (*init)(struct intel_engine_cs *engine); + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + if (HAS_EXECLISTS(gt->i915)) + init = intel_execlists_submission_init; + else + init = intel_ring_submission_init; + + for_each_engine(engine, gt, id) { + err = init(engine); + if (err) + goto cleanup; + } + + return 0; + +cleanup: + intel_engines_cleanup(gt); + return err; +} + +void intel_engine_init_execlists(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + execlists->port_mask = 1; + GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); + GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); + + memset(execlists->pending, 0, sizeof(execlists->pending)); + execlists->active = + memset(execlists->inflight, 0, sizeof(execlists->inflight)); + + execlists->queue_priority_hint = INT_MIN; + execlists->queue = RB_ROOT_CACHED; +} + +static void cleanup_status_page(struct intel_engine_cs *engine) +{ + struct i915_vma *vma; + + /* Prevent writes into HWSP after returning the page to the system */ + intel_engine_set_hwsp_writemask(engine, ~0u); + + vma = fetch_and_zero(&engine->status_page.vma); + if (!vma) + return; + + if (!HWS_NEEDS_PHYSICAL(engine->i915)) + i915_vma_unpin(vma); + + i915_gem_object_unpin_map(vma->obj); + i915_gem_object_put(vma->obj); +} + +static int pin_ggtt_status_page(struct intel_engine_cs *engine, + struct i915_vma *vma) +{ + unsigned int flags; + + flags = PIN_GLOBAL; + if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt)) + /* + * On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actually map it). + */ + flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; + + return i915_vma_pin(vma, 0, 0, flags); +} + +static int init_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + void *vaddr; + int ret; + + /* + * Though the HWS register does support 36bit addresses, historically + * we have had hangs and corruption reported due to wild writes if + * the HWS is placed above 4G. We only allow objects to be allocated + * in GFP_DMA32 for i965, and no earlier physical address users had + * access to more than 4G. + */ + obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate status page\n"); + return PTR_ERR(obj); + } + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err; + } + + engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE); + engine->status_page.vma = vma; + + if (!HWS_NEEDS_PHYSICAL(engine->i915)) { + ret = pin_ggtt_status_page(engine, vma); + if (ret) + goto err_unpin; + } + + return 0; + +err_unpin: + i915_gem_object_unpin_map(obj); +err: + i915_gem_object_put(obj); + return ret; +} + +static int intel_engine_setup_common(struct intel_engine_cs *engine) +{ + int err; + + init_llist_head(&engine->barrier_tasks); + + err = init_status_page(engine); + if (err) + return err; + + intel_engine_init_active(engine, ENGINE_PHYSICAL); + intel_engine_init_breadcrumbs(engine); + intel_engine_init_execlists(engine); + intel_engine_init_cmd_parser(engine); + intel_engine_init__pm(engine); + + intel_engine_pool_init(&engine->pool); + + /* Use the whole device by default */ + engine->sseu = + intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); + + intel_engine_init_workarounds(engine); + intel_engine_init_whitelist(engine); + intel_engine_init_ctx_wa(engine); + + return 0; +} + +/** + * intel_engines_setup- setup engine state not requiring hw access + * @gt: pointer to struct intel_gt + * + * Initializes engine structure members shared between legacy and execlists + * submission modes which do not require hardware access. + * + * Typically done early in the submission mode specific engine setup stage. + */ +int intel_engines_setup(struct intel_gt *gt) +{ + int (*setup)(struct intel_engine_cs *engine); + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + if (HAS_EXECLISTS(gt->i915)) + setup = intel_execlists_submission_setup; + else + setup = intel_ring_submission_setup; + + for_each_engine(engine, gt, id) { + err = intel_engine_setup_common(engine); + if (err) + goto cleanup; + + err = setup(engine); + if (err) + goto cleanup; + + /* We expect the backend to take control over its state */ + GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree); + + GEM_BUG_ON(!engine->cops); + } + + return 0; + +cleanup: + intel_engines_cleanup(gt); + return err; +} + +struct measure_breadcrumb { + struct i915_request rq; + struct intel_timeline timeline; + struct intel_ring ring; + u32 cs[1024]; +}; + +static int measure_breadcrumb_dw(struct intel_engine_cs *engine) +{ + struct measure_breadcrumb *frame; + int dw = -ENOMEM; + + GEM_BUG_ON(!engine->gt->scratch); + + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) + return -ENOMEM; + + if (intel_timeline_init(&frame->timeline, + engine->gt, + engine->status_page.vma)) + goto out_frame; + + mutex_lock(&frame->timeline.mutex); + + frame->ring.vaddr = frame->cs; + frame->ring.size = sizeof(frame->cs); + frame->ring.effective_size = frame->ring.size; + intel_ring_update_space(&frame->ring); + + frame->rq.i915 = engine->i915; + frame->rq.engine = engine; + frame->rq.ring = &frame->ring; + rcu_assign_pointer(frame->rq.timeline, &frame->timeline); + + dw = intel_timeline_pin(&frame->timeline); + if (dw < 0) + goto out_timeline; + + spin_lock_irq(&engine->active.lock); + dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; + spin_unlock_irq(&engine->active.lock); + + GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */ + + intel_timeline_unpin(&frame->timeline); + +out_timeline: + mutex_unlock(&frame->timeline.mutex); + intel_timeline_fini(&frame->timeline); +out_frame: + kfree(frame); + return dw; +} + +void +intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass) +{ + INIT_LIST_HEAD(&engine->active.requests); + + spin_lock_init(&engine->active.lock); + lockdep_set_subclass(&engine->active.lock, subclass); + + /* + * Due to an interesting quirk in lockdep's internal debug tracking, + * after setting a subclass we must ensure the lock is used. Otherwise, + * nr_unused_locks is incremented once too often. + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + local_irq_disable(); + lock_map_acquire(&engine->active.lock.dep_map); + lock_map_release(&engine->active.lock.dep_map); + local_irq_enable(); +#endif +} + +static struct intel_context * +create_kernel_context(struct intel_engine_cs *engine) +{ + static struct lock_class_key kernel; + struct intel_context *ce; + int err; + + ce = intel_context_create(engine->i915->kernel_context, engine); + if (IS_ERR(ce)) + return ce; + + ce->ring = __intel_context_ring_size(SZ_4K); + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + return ERR_PTR(err); + } + + /* + * Give our perma-pinned kernel timelines a separate lockdep class, + * so that we can use them from within the normal user timelines + * should we need to inject GPU operations during their request + * construction. + */ + lockdep_set_class(&ce->timeline->mutex, &kernel); + + return ce; +} + +/** + * intel_engines_init_common - initialize cengine state which might require hw access + * @engine: Engine to initialize. + * + * Initializes @engine@ structure members shared between legacy and execlists + * submission modes which do require hardware access. + * + * Typcally done at later stages of submission mode specific engine setup. + * + * Returns zero on success or an error code on failure. + */ +int intel_engine_init_common(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + int ret; + + engine->set_default_submission(engine); + + /* + * We may need to do things with the shrinker which + * require us to immediately switch back to the default + * context. This can cause a problem as pinning the + * default context also requires GTT space which may not + * be available. To avoid this we always pin the default + * context. + */ + ce = create_kernel_context(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + engine->kernel_context = ce; + + ret = measure_breadcrumb_dw(engine); + if (ret < 0) + goto err_unpin; + + engine->emit_fini_breadcrumb_dw = ret; + + return 0; + +err_unpin: + intel_context_unpin(ce); + intel_context_put(ce); + return ret; +} + +/** + * intel_engines_cleanup_common - cleans up the engine state created by + * the common initiailizers. + * @engine: Engine to cleanup. + * + * This cleans up everything created by the common helpers. + */ +void intel_engine_cleanup_common(struct intel_engine_cs *engine) +{ + GEM_BUG_ON(!list_empty(&engine->active.requests)); + + cleanup_status_page(engine); + + intel_engine_pool_fini(&engine->pool); + intel_engine_fini_breadcrumbs(engine); + intel_engine_cleanup_cmd_parser(engine); + + if (engine->default_state) + i915_gem_object_put(engine->default_state); + + if (engine->kernel_context) { + intel_context_unpin(engine->kernel_context); + intel_context_put(engine->kernel_context); + } + GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); + + intel_wa_list_free(&engine->ctx_wa_list); + intel_wa_list_free(&engine->wa_list); + intel_wa_list_free(&engine->whitelist); +} + +u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + u64 acthd; + + if (INTEL_GEN(i915) >= 8) + acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW); + else if (INTEL_GEN(i915) >= 4) + acthd = ENGINE_READ(engine, RING_ACTHD); + else + acthd = ENGINE_READ(engine, ACTHD); + + return acthd; +} + +u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) +{ + u64 bbaddr; + + if (INTEL_GEN(engine->i915) >= 8) + bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW); + else + bbaddr = ENGINE_READ(engine, RING_BBADDR); + + return bbaddr; +} + +static unsigned long stop_timeout(const struct intel_engine_cs *engine) +{ + if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */ + return 0; + + /* + * If we are doing a normal GPU reset, we can take our time and allow + * the engine to quiesce. We've stopped submission to the engine, and + * if we wait long enough an innocent context should complete and + * leave the engine idle. So they should not be caught unaware by + * the forthcoming GPU reset (which usually follows the stop_cs)! + */ + return READ_ONCE(engine->props.stop_timeout_ms); +} + +int intel_engine_stop_cs(struct intel_engine_cs *engine) +{ + struct intel_uncore *uncore = engine->uncore; + const u32 base = engine->mmio_base; + const i915_reg_t mode = RING_MI_MODE(base); + int err; + + if (INTEL_GEN(engine->i915) < 3) + return -ENODEV; + + GEM_TRACE("%s\n", engine->name); + + intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); + + err = 0; + if (__intel_wait_for_register_fw(uncore, + mode, MODE_IDLE, MODE_IDLE, + 1000, stop_timeout(engine), + NULL)) { + GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name); + err = -ETIMEDOUT; + } + + /* A final mmio read to let GPU writes be hopefully flushed to memory */ + intel_uncore_posting_read_fw(uncore, mode); + + return err; +} + +void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) +{ + GEM_TRACE("%s\n", engine->name); + + ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); +} + +const char *i915_cache_level_str(struct drm_i915_private *i915, int type) +{ + switch (type) { + case I915_CACHE_NONE: return " uncached"; + case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped"; + case I915_CACHE_L3_LLC: return " L3+LLC"; + case I915_CACHE_WT: return " WT"; + default: return ""; + } +} + +static u32 +read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, + i915_reg_t reg) +{ + struct drm_i915_private *i915 = engine->i915; + struct intel_uncore *uncore = engine->uncore; + u32 mcr_mask, mcr_ss, mcr, old_mcr, val; + enum forcewake_domains fw_domains; + + if (INTEL_GEN(i915) >= 11) { + mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; + mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); + } else { + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); + } + + fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, + FW_REG_READ); + fw_domains |= intel_uncore_forcewake_for_reg(uncore, + GEN8_MCR_SELECTOR, + FW_REG_READ | FW_REG_WRITE); + + spin_lock_irq(&uncore->lock); + intel_uncore_forcewake_get__locked(uncore, fw_domains); + + old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR); + + mcr &= ~mcr_mask; + mcr |= mcr_ss; + intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); + + val = intel_uncore_read_fw(uncore, reg); + + mcr &= ~mcr_mask; + mcr |= old_mcr & mcr_mask; + + intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); + + intel_uncore_forcewake_put__locked(uncore, fw_domains); + spin_unlock_irq(&uncore->lock); + + return val; +} + +/* NB: please notice the memset */ +void intel_engine_get_instdone(struct intel_engine_cs *engine, + struct intel_instdone *instdone) +{ + struct drm_i915_private *i915 = engine->i915; + const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + struct intel_uncore *uncore = engine->uncore; + u32 mmio_base = engine->mmio_base; + int slice; + int subslice; + + memset(instdone, 0, sizeof(*instdone)); + + switch (INTEL_GEN(i915)) { + default: + instdone->instdone = + intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); + + if (engine->id != RCS0) + break; + + instdone->slice_common = + intel_uncore_read(uncore, GEN7_SC_INSTDONE); + for_each_instdone_slice_subslice(i915, sseu, slice, subslice) { + instdone->sampler[slice][subslice] = + read_subslice_reg(engine, slice, subslice, + GEN7_SAMPLER_INSTDONE); + instdone->row[slice][subslice] = + read_subslice_reg(engine, slice, subslice, + GEN7_ROW_INSTDONE); + } + break; + case 7: + instdone->instdone = + intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); + + if (engine->id != RCS0) + break; + + instdone->slice_common = + intel_uncore_read(uncore, GEN7_SC_INSTDONE); + instdone->sampler[0][0] = + intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE); + instdone->row[0][0] = + intel_uncore_read(uncore, GEN7_ROW_INSTDONE); + + break; + case 6: + case 5: + case 4: + instdone->instdone = + intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); + if (engine->id == RCS0) + /* HACK: Using the wrong struct member */ + instdone->slice_common = + intel_uncore_read(uncore, GEN4_INSTDONE1); + break; + case 3: + case 2: + instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE); + break; + } +} + +static bool ring_is_idle(struct intel_engine_cs *engine) +{ + bool idle = true; + + if (I915_SELFTEST_ONLY(!engine->mmio_base)) + return true; + + if (!intel_engine_pm_get_if_awake(engine)) + return true; + + /* First check that no commands are left in the ring */ + if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) != + (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR)) + idle = false; + + /* No bit for gen2, so assume the CS parser is idle */ + if (INTEL_GEN(engine->i915) > 2 && + !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) + idle = false; + + intel_engine_pm_put(engine); + + return idle; +} + +void intel_engine_flush_submission(struct intel_engine_cs *engine) +{ + struct tasklet_struct *t = &engine->execlists.tasklet; + + if (__tasklet_is_scheduled(t)) { + local_bh_disable(); + if (tasklet_trylock(t)) { + /* Must wait for any GPU reset in progress. */ + if (__tasklet_is_enabled(t)) + t->func(t->data); + tasklet_unlock(t); + } + local_bh_enable(); + } + + /* Otherwise flush the tasklet if it was running on another cpu */ + tasklet_unlock_wait(t); +} + +/** + * intel_engine_is_idle() - Report if the engine has finished process all work + * @engine: the intel_engine_cs + * + * Return true if there are no requests pending, nothing left to be submitted + * to hardware, and that the engine is idle. + */ +bool intel_engine_is_idle(struct intel_engine_cs *engine) +{ + /* More white lies, if wedged, hw state is inconsistent */ + if (intel_gt_is_wedged(engine->gt)) + return true; + + if (!intel_engine_pm_is_awake(engine)) + return true; + + /* Waiting to drain ELSP? */ + if (execlists_active(&engine->execlists)) { + synchronize_hardirq(engine->i915->drm.pdev->irq); + + intel_engine_flush_submission(engine); + + if (execlists_active(&engine->execlists)) + return false; + } + + /* ELSP is empty, but there are ready requests? E.g. after reset */ + if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) + return false; + + /* Ring stopped? */ + return ring_is_idle(engine); +} + +bool intel_engines_are_idle(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* + * If the driver is wedged, HW state may be very inconsistent and + * report that it is still busy, even though we have stopped using it. + */ + if (intel_gt_is_wedged(gt)) + return true; + + /* Already parked (and passed an idleness test); must still be idle */ + if (!READ_ONCE(gt->awake)) + return true; + + for_each_engine(engine, gt, id) { + if (!intel_engine_is_idle(engine)) + return false; + } + + return true; +} + +void intel_engines_reset_default_submission(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + engine->set_default_submission(engine); +} + +bool intel_engine_can_store_dword(struct intel_engine_cs *engine) +{ + switch (INTEL_GEN(engine->i915)) { + case 2: + return false; /* uses physical not virtual addresses */ + case 3: + /* maybe only uses physical not virtual addresses */ + return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); + case 4: + return !IS_I965G(engine->i915); /* who knows! */ + case 6: + return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ + default: + return true; + } +} + +static int print_sched_attr(struct drm_i915_private *i915, + const struct i915_sched_attr *attr, + char *buf, int x, int len) +{ + if (attr->priority == I915_PRIORITY_INVALID) + return x; + + x += snprintf(buf + x, len - x, + " prio=%d", attr->priority); + + return x; +} + +static void print_request(struct drm_printer *m, + struct i915_request *rq, + const char *prefix) +{ + const char *name = rq->fence.ops->get_timeline_name(&rq->fence); + char buf[80] = ""; + int x = 0; + + x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); + + drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n", + prefix, + rq->fence.context, rq->fence.seqno, + i915_request_completed(rq) ? "!" : + i915_request_started(rq) ? "*" : + "", + test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &rq->fence.flags) ? "+" : + test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &rq->fence.flags) ? "-" : + "", + buf, + jiffies_to_msecs(jiffies - rq->emitted_jiffies), + name); +} + +static void hexdump(struct drm_printer *m, const void *buf, size_t len) +{ + const size_t rowsize = 8 * sizeof(u32); + const void *prev = NULL; + bool skip = false; + size_t pos; + + for (pos = 0; pos < len; pos += rowsize) { + char line[128]; + + if (prev && !memcmp(prev, buf + pos, rowsize)) { + if (!skip) { + drm_printf(m, "*\n"); + skip = true; + } + continue; + } + + WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, + rowsize, sizeof(u32), + line, sizeof(line), + false) >= sizeof(line)); + drm_printf(m, "[%04zx] %s\n", pos, line); + + prev = buf + pos; + skip = false; + } +} + +static struct intel_timeline *get_timeline(struct i915_request *rq) +{ + struct intel_timeline *tl; + + /* + * Even though we are holding the engine->active.lock here, there + * is no control over the submission queue per-se and we are + * inspecting the active state at a random point in time, with an + * unknown queue. Play safe and make sure the timeline remains valid. + * (Only being used for pretty printing, one extra kref shouldn't + * cause a camel stampede!) + */ + rcu_read_lock(); + tl = rcu_dereference(rq->timeline); + if (!kref_get_unless_zero(&tl->kref)) + tl = NULL; + rcu_read_unlock(); + + return tl; +} + +static const char *repr_timer(const struct timer_list *t) +{ + if (!READ_ONCE(t->expires)) + return "inactive"; + + if (timer_pending(t)) + return "active"; + + return "expired"; +} + +static void intel_engine_print_registers(struct intel_engine_cs *engine, + struct drm_printer *m) +{ + struct drm_i915_private *dev_priv = engine->i915; + struct intel_engine_execlists * const execlists = &engine->execlists; + u64 addr; + + if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) + drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); + drm_printf(m, "\tRING_START: 0x%08x\n", + ENGINE_READ(engine, RING_START)); + drm_printf(m, "\tRING_HEAD: 0x%08x\n", + ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR); + drm_printf(m, "\tRING_TAIL: 0x%08x\n", + ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR); + drm_printf(m, "\tRING_CTL: 0x%08x%s\n", + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); + if (INTEL_GEN(engine->i915) > 2) { + drm_printf(m, "\tRING_MODE: 0x%08x%s\n", + ENGINE_READ(engine, RING_MI_MODE), + ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : ""); + } + + if (INTEL_GEN(dev_priv) >= 6) { + drm_printf(m, "\tRING_IMR: %08x\n", + ENGINE_READ(engine, RING_IMR)); + } + + addr = intel_engine_get_active_head(engine); + drm_printf(m, "\tACTHD: 0x%08x_%08x\n", + upper_32_bits(addr), lower_32_bits(addr)); + addr = intel_engine_get_last_batch_head(engine); + drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", + upper_32_bits(addr), lower_32_bits(addr)); + if (INTEL_GEN(dev_priv) >= 8) + addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW); + else if (INTEL_GEN(dev_priv) >= 4) + addr = ENGINE_READ(engine, RING_DMA_FADD); + else + addr = ENGINE_READ(engine, DMA_FADD_I8XX); + drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", + upper_32_bits(addr), lower_32_bits(addr)); + if (INTEL_GEN(dev_priv) >= 4) { + drm_printf(m, "\tIPEIR: 0x%08x\n", + ENGINE_READ(engine, RING_IPEIR)); + drm_printf(m, "\tIPEHR: 0x%08x\n", + ENGINE_READ(engine, RING_IPEHR)); + } else { + drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR)); + drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); + } + + if (HAS_EXECLISTS(dev_priv)) { + struct i915_request * const *port, *rq; + const u32 *hws = + &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; + const u8 num_entries = execlists->csb_size; + unsigned int idx; + u8 read, write; + + drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n", + yesno(test_bit(TASKLET_STATE_SCHED, + &engine->execlists.tasklet.state)), + enableddisabled(!atomic_read(&engine->execlists.tasklet.count)), + repr_timer(&engine->execlists.preempt), + repr_timer(&engine->execlists.timer)); + + read = execlists->csb_head; + write = READ_ONCE(*execlists->csb_write); + + drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n", + ENGINE_READ(engine, RING_EXECLIST_STATUS_LO), + ENGINE_READ(engine, RING_EXECLIST_STATUS_HI), + read, write, num_entries); + + if (read >= num_entries) + read = 0; + if (write >= num_entries) + write = 0; + if (read > write) + write += num_entries; + while (read < write) { + idx = ++read % num_entries; + drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", + idx, hws[idx * 2], hws[idx * 2 + 1]); + } + + execlists_active_lock_bh(execlists); + rcu_read_lock(); + for (port = execlists->active; (rq = *port); port++) { + char hdr[80]; + int len; + + len = snprintf(hdr, sizeof(hdr), + "\t\tActive[%d]: ", + (int)(port - execlists->active)); + if (!i915_request_signaled(rq)) { + struct intel_timeline *tl = get_timeline(rq); + + len += snprintf(hdr + len, sizeof(hdr) - len, + "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ", + i915_ggtt_offset(rq->ring->vma), + tl ? tl->hwsp_offset : 0, + hwsp_seqno(rq)); + + if (tl) + intel_timeline_put(tl); + } + snprintf(hdr + len, sizeof(hdr) - len, "rq: "); + print_request(m, rq, hdr); + } + for (port = execlists->pending; (rq = *port); port++) { + struct intel_timeline *tl = get_timeline(rq); + char hdr[80]; + + snprintf(hdr, sizeof(hdr), + "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", + (int)(port - execlists->pending), + i915_ggtt_offset(rq->ring->vma), + tl ? tl->hwsp_offset : 0, + hwsp_seqno(rq)); + print_request(m, rq, hdr); + + if (tl) + intel_timeline_put(tl); + } + rcu_read_unlock(); + execlists_active_unlock_bh(execlists); + } else if (INTEL_GEN(dev_priv) > 6) { + drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", + ENGINE_READ(engine, RING_PP_DIR_BASE)); + drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", + ENGINE_READ(engine, RING_PP_DIR_BASE_READ)); + drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", + ENGINE_READ(engine, RING_PP_DIR_DCLV)); + } +} + +static void print_request_ring(struct drm_printer *m, struct i915_request *rq) +{ + void *ring; + int size; + + drm_printf(m, + "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", + rq->head, rq->postfix, rq->tail, + rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, + rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); + + size = rq->tail - rq->head; + if (rq->tail < rq->head) + size += rq->ring->size; + + ring = kmalloc(size, GFP_ATOMIC); + if (ring) { + const void *vaddr = rq->ring->vaddr; + unsigned int head = rq->head; + unsigned int len = 0; + + if (rq->tail < head) { + len = rq->ring->size - head; + memcpy(ring, vaddr + head, len); + head = 0; + } + memcpy(ring + len, vaddr + head, size - len); + + hexdump(m, ring, size); + kfree(ring); + } +} + +void intel_engine_dump(struct intel_engine_cs *engine, + struct drm_printer *m, + const char *header, ...) +{ + struct i915_gpu_error * const error = &engine->i915->gpu_error; + struct i915_request *rq; + intel_wakeref_t wakeref; + unsigned long flags; + + if (header) { + va_list ap; + + va_start(ap, header); + drm_vprintf(m, header, &ap); + va_end(ap); + } + + if (intel_gt_is_wedged(engine->gt)) + drm_printf(m, "*** WEDGED ***\n"); + + drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); + + rcu_read_lock(); + rq = READ_ONCE(engine->heartbeat.systole); + if (rq) + drm_printf(m, "\tHeartbeat: %d ms ago\n", + jiffies_to_msecs(jiffies - rq->emitted_jiffies)); + rcu_read_unlock(); + drm_printf(m, "\tReset count: %d (global %d)\n", + i915_reset_engine_count(error, engine), + i915_reset_count(error)); + + drm_printf(m, "\tRequests:\n"); + + spin_lock_irqsave(&engine->active.lock, flags); + rq = intel_engine_find_active_request(engine); + if (rq) { + struct intel_timeline *tl = get_timeline(rq); + + print_request(m, rq, "\t\tactive "); + + drm_printf(m, "\t\tring->start: 0x%08x\n", + i915_ggtt_offset(rq->ring->vma)); + drm_printf(m, "\t\tring->head: 0x%08x\n", + rq->ring->head); + drm_printf(m, "\t\tring->tail: 0x%08x\n", + rq->ring->tail); + drm_printf(m, "\t\tring->emit: 0x%08x\n", + rq->ring->emit); + drm_printf(m, "\t\tring->space: 0x%08x\n", + rq->ring->space); + + if (tl) { + drm_printf(m, "\t\tring->hwsp: 0x%08x\n", + tl->hwsp_offset); + intel_timeline_put(tl); + } + + print_request_ring(m, rq); + + if (rq->hw_context->lrc_reg_state) { + drm_printf(m, "Logical Ring Context:\n"); + hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE); + } + } + spin_unlock_irqrestore(&engine->active.lock, flags); + + drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base); + wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm); + if (wakeref) { + intel_engine_print_registers(engine, m); + intel_runtime_pm_put(engine->uncore->rpm, wakeref); + } else { + drm_printf(m, "\tDevice is asleep; skipping register dump\n"); + } + + intel_execlists_show_requests(engine, m, print_request, 8); + + drm_printf(m, "HWSP:\n"); + hexdump(m, engine->status_page.addr, PAGE_SIZE); + + drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); + + intel_engine_print_breadcrumbs(engine, m); +} + +/** + * intel_enable_engine_stats() - Enable engine busy tracking on engine + * @engine: engine to enable stats collection + * + * Start collecting the engine busyness data for @engine. + * + * Returns 0 on success or a negative error code. + */ +int intel_enable_engine_stats(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists *execlists = &engine->execlists; + unsigned long flags; + int err = 0; + + if (!intel_engine_supports_stats(engine)) + return -ENODEV; + + execlists_active_lock_bh(execlists); + write_seqlock_irqsave(&engine->stats.lock, flags); + + if (unlikely(engine->stats.enabled == ~0)) { + err = -EBUSY; + goto unlock; + } + + if (engine->stats.enabled++ == 0) { + struct i915_request * const *port; + struct i915_request *rq; + + engine->stats.enabled_at = ktime_get(); + + /* XXX submission method oblivious? */ + for (port = execlists->active; (rq = *port); port++) + engine->stats.active++; + + for (port = execlists->pending; (rq = *port); port++) { + /* Exclude any contexts already counted in active */ + if (!intel_context_inflight_count(rq->hw_context)) + engine->stats.active++; + } + + if (engine->stats.active) + engine->stats.start = engine->stats.enabled_at; + } + +unlock: + write_sequnlock_irqrestore(&engine->stats.lock, flags); + execlists_active_unlock_bh(execlists); + + return err; +} + +static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) +{ + ktime_t total = engine->stats.total; + + /* + * If the engine is executing something at the moment + * add it to the total. + */ + if (engine->stats.active) + total = ktime_add(total, + ktime_sub(ktime_get(), engine->stats.start)); + + return total; +} + +/** + * intel_engine_get_busy_time() - Return current accumulated engine busyness + * @engine: engine to report on + * + * Returns accumulated time @engine was busy since engine stats were enabled. + */ +ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine) +{ + unsigned int seq; + ktime_t total; + + do { + seq = read_seqbegin(&engine->stats.lock); + total = __intel_engine_get_busy_time(engine); + } while (read_seqretry(&engine->stats.lock, seq)); + + return total; +} + +/** + * intel_disable_engine_stats() - Disable engine busy tracking on engine + * @engine: engine to disable stats collection + * + * Stops collecting the engine busyness data for @engine. + */ +void intel_disable_engine_stats(struct intel_engine_cs *engine) +{ + unsigned long flags; + + if (!intel_engine_supports_stats(engine)) + return; + + write_seqlock_irqsave(&engine->stats.lock, flags); + WARN_ON_ONCE(engine->stats.enabled == 0); + if (--engine->stats.enabled == 0) { + engine->stats.total = __intel_engine_get_busy_time(engine); + engine->stats.active = 0; + } + write_sequnlock_irqrestore(&engine->stats.lock, flags); +} + +static bool match_ring(struct i915_request *rq) +{ + u32 ring = ENGINE_READ(rq->engine, RING_START); + + return ring == i915_ggtt_offset(rq->ring->vma); +} + +struct i915_request * +intel_engine_find_active_request(struct intel_engine_cs *engine) +{ + struct i915_request *request, *active = NULL; + + /* + * We are called by the error capture, reset and to dump engine + * state at random points in time. In particular, note that neither is + * crucially ordered with an interrupt. After a hang, the GPU is dead + * and we assume that no more writes can happen (we waited long enough + * for all writes that were in transaction to be flushed) - adding an + * extra delay for a recent interrupt is pointless. Hence, we do + * not need an engine->irq_seqno_barrier() before the seqno reads. + * At all other times, we must assume the GPU is still running, but + * we only care about the snapshot of this moment. + */ + lockdep_assert_held(&engine->active.lock); + list_for_each_entry(request, &engine->active.requests, sched.link) { + if (i915_request_completed(request)) + continue; + + if (!i915_request_started(request)) + continue; + + /* More than one preemptible request may match! */ + if (!match_ring(request)) + continue; + + active = request; + break; + } + + return active; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "mock_engine.c" +#include "selftest_engine.c" +#include "selftest_engine_cs.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c new file mode 100644 index 000000000000..06aa14c7aa8c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -0,0 +1,234 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_request.h" + +#include "intel_context.h" +#include "intel_engine_heartbeat.h" +#include "intel_engine_pm.h" +#include "intel_engine.h" +#include "intel_gt.h" +#include "intel_reset.h" + +/* + * While the engine is active, we send a periodic pulse along the engine + * to check on its health and to flush any idle-barriers. If that request + * is stuck, and we fail to preempt it, we declare the engine hung and + * issue a reset -- in the hope that restores progress. + */ + +static bool next_heartbeat(struct intel_engine_cs *engine) +{ + long delay; + + delay = READ_ONCE(engine->props.heartbeat_interval_ms); + if (!delay) + return false; + + delay = msecs_to_jiffies_timeout(delay); + if (delay >= HZ) + delay = round_jiffies_up_relative(delay); + schedule_delayed_work(&engine->heartbeat.work, delay); + + return true; +} + +static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) +{ + engine->wakeref_serial = READ_ONCE(engine->serial) + 1; + i915_request_add_active_barriers(rq); +} + +static void show_heartbeat(const struct i915_request *rq, + struct intel_engine_cs *engine) +{ + struct drm_printer p = drm_debug_printer("heartbeat"); + + intel_engine_dump(engine, &p, + "%s heartbeat {prio:%d} not ticking\n", + engine->name, + rq->sched.attr.priority); +} + +static void heartbeat(struct work_struct *wrk) +{ + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN), + }; + struct intel_engine_cs *engine = + container_of(wrk, typeof(*engine), heartbeat.work.work); + struct intel_context *ce = engine->kernel_context; + struct i915_request *rq; + + if (!intel_engine_pm_get_if_awake(engine)) + return; + + rq = engine->heartbeat.systole; + if (rq && i915_request_completed(rq)) { + i915_request_put(rq); + engine->heartbeat.systole = NULL; + } + + if (intel_gt_is_wedged(engine->gt)) + goto out; + + if (engine->heartbeat.systole) { + if (engine->schedule && + rq->sched.attr.priority < I915_PRIORITY_BARRIER) { + /* + * Gradually raise the priority of the heartbeat to + * give high priority work [which presumably desires + * low latency and no jitter] the chance to naturally + * complete before being preempted. + */ + attr.priority = I915_PRIORITY_MASK; + if (rq->sched.attr.priority >= attr.priority) + attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT); + if (rq->sched.attr.priority >= attr.priority) + attr.priority = I915_PRIORITY_BARRIER; + + local_bh_disable(); + engine->schedule(rq, &attr); + local_bh_enable(); + } else { + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + show_heartbeat(rq, engine); + + intel_gt_handle_error(engine->gt, engine->mask, + I915_ERROR_CAPTURE, + "stopped heartbeat on %s", + engine->name); + } + goto out; + } + + if (engine->wakeref_serial == engine->serial) + goto out; + + mutex_lock(&ce->timeline->mutex); + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); + intel_context_exit(ce); + if (IS_ERR(rq)) + goto unlock; + + idle_pulse(engine, rq); + if (i915_modparams.enable_hangcheck) + engine->heartbeat.systole = i915_request_get(rq); + + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + +unlock: + mutex_unlock(&ce->timeline->mutex); +out: + if (!next_heartbeat(engine)) + i915_request_put(fetch_and_zero(&engine->heartbeat.systole)); + intel_engine_pm_put(engine); +} + +void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) + return; + + next_heartbeat(engine); +} + +void intel_engine_park_heartbeat(struct intel_engine_cs *engine) +{ + if (cancel_delayed_work(&engine->heartbeat.work)) + i915_request_put(fetch_and_zero(&engine->heartbeat.systole)); +} + +void intel_engine_init_heartbeat(struct intel_engine_cs *engine) +{ + INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat); +} + +int intel_engine_set_heartbeat(struct intel_engine_cs *engine, + unsigned long delay) +{ + int err; + + /* Send one last pulse before to cleanup persistent hogs */ + if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) { + err = intel_engine_pulse(engine); + if (err) + return err; + } + + WRITE_ONCE(engine->props.heartbeat_interval_ms, delay); + + if (intel_engine_pm_get_if_awake(engine)) { + if (delay) + intel_engine_unpark_heartbeat(engine); + else + intel_engine_park_heartbeat(engine); + intel_engine_pm_put(engine); + } + + return 0; +} + +int intel_engine_pulse(struct intel_engine_cs *engine) +{ + struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER }; + struct intel_context *ce = engine->kernel_context; + struct i915_request *rq; + int err = 0; + + if (!intel_engine_has_preemption(engine)) + return -ENODEV; + + if (!intel_engine_pm_get_if_awake(engine)) + return 0; + + if (mutex_lock_interruptible(&ce->timeline->mutex)) + goto out_rpm; + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); + intel_context_exit(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_unlock; + } + + rq->flags |= I915_REQUEST_SENTINEL; + idle_pulse(engine, rq); + + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + +out_unlock: + mutex_unlock(&ce->timeline->mutex); +out_rpm: + intel_engine_pm_put(engine); + return err; +} + +int intel_engine_flush_barriers(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + if (llist_empty(&engine->barrier_tasks)) + return 0; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + idle_pulse(engine, rq); + i915_request_add(rq); + + return 0; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_engine_heartbeat.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h new file mode 100644 index 000000000000..a7b8c0f9e005 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_ENGINE_HEARTBEAT_H +#define INTEL_ENGINE_HEARTBEAT_H + +struct intel_engine_cs; + +void intel_engine_init_heartbeat(struct intel_engine_cs *engine); + +int intel_engine_set_heartbeat(struct intel_engine_cs *engine, + unsigned long delay); + +void intel_engine_park_heartbeat(struct intel_engine_cs *engine); +void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine); + +int intel_engine_pulse(struct intel_engine_cs *engine); +int intel_engine_flush_barriers(struct intel_engine_cs *engine); + +#endif /* INTEL_ENGINE_HEARTBEAT_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c new file mode 100644 index 000000000000..874d82677179 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -0,0 +1,199 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" + +#include "intel_engine.h" +#include "intel_engine_heartbeat.h" +#include "intel_engine_pm.h" +#include "intel_engine_pool.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_rc6.h" +#include "intel_ring.h" + +static int __engine_unpark(struct intel_wakeref *wf) +{ + struct intel_engine_cs *engine = + container_of(wf, typeof(*engine), wakeref); + void *map; + + GEM_TRACE("%s\n", engine->name); + + intel_gt_pm_get(engine->gt); + + /* Pin the default state for fast resets from atomic context. */ + map = NULL; + if (engine->default_state) + map = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (!IS_ERR_OR_NULL(map)) + engine->pinned_default_state = map; + + if (engine->unpark) + engine->unpark(engine); + + intel_engine_unpark_heartbeat(engine); + return 0; +} + +#if IS_ENABLED(CONFIG_LOCKDEP) + +static inline unsigned long __timeline_mark_lock(struct intel_context *ce) +{ + unsigned long flags; + + local_irq_save(flags); + mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_); + + return flags; +} + +static inline void __timeline_mark_unlock(struct intel_context *ce, + unsigned long flags) +{ + mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_); + local_irq_restore(flags); +} + +#else + +static inline unsigned long __timeline_mark_lock(struct intel_context *ce) +{ + return 0; +} + +static inline void __timeline_mark_unlock(struct intel_context *ce, + unsigned long flags) +{ +} + +#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */ + +static bool switch_to_kernel_context(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + unsigned long flags; + bool result = true; + + /* Already inside the kernel context, safe to power down. */ + if (engine->wakeref_serial == engine->serial) + return true; + + /* GPU is pointing to the void, as good as in the kernel context. */ + if (intel_gt_is_wedged(engine->gt)) + return true; + + /* + * Note, we do this without taking the timeline->mutex. We cannot + * as we may be called while retiring the kernel context and so + * already underneath the timeline->mutex. Instead we rely on the + * exclusive property of the __engine_park that prevents anyone + * else from creating a request on this engine. This also requires + * that the ring is empty and we avoid any waits while constructing + * the context, as they assume protection by the timeline->mutex. + * This should hold true as we can only park the engine after + * retiring the last request, thus all rings should be empty and + * all timelines idle. + */ + flags = __timeline_mark_lock(engine->kernel_context); + + rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT); + if (IS_ERR(rq)) + /* Context switch failed, hope for the best! Maybe reset? */ + goto out_unlock; + + intel_timeline_enter(i915_request_timeline(rq)); + + /* Check again on the next retirement. */ + engine->wakeref_serial = engine->serial + 1; + i915_request_add_active_barriers(rq); + + /* Install ourselves as a preemption barrier */ + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + __i915_request_commit(rq); + + /* Release our exclusive hold on the engine */ + __intel_wakeref_defer_park(&engine->wakeref); + __i915_request_queue(rq, NULL); + + result = false; +out_unlock: + __timeline_mark_unlock(engine->kernel_context, flags); + return result; +} + +static void call_idle_barriers(struct intel_engine_cs *engine) +{ + struct llist_node *node, *next; + + llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { + struct dma_fence_cb *cb = + container_of((struct list_head *)node, + typeof(*cb), node); + + cb->func(NULL, cb); + } +} + +static int __engine_park(struct intel_wakeref *wf) +{ + struct intel_engine_cs *engine = + container_of(wf, typeof(*engine), wakeref); + + engine->saturated = 0; + + /* + * If one and only one request is completed between pm events, + * we know that we are inside the kernel context and it is + * safe to power down. (We are paranoid in case that runtime + * suspend causes corruption to the active context image, and + * want to avoid that impacting userspace.) + */ + if (!switch_to_kernel_context(engine)) + return -EBUSY; + + GEM_TRACE("%s\n", engine->name); + + call_idle_barriers(engine); /* cleanup after wedging */ + + intel_engine_park_heartbeat(engine); + intel_engine_disarm_breadcrumbs(engine); + intel_engine_pool_park(&engine->pool); + + /* Must be reset upon idling, or we may miss the busy wakeup. */ + GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); + + if (engine->park) + engine->park(engine); + + if (engine->pinned_default_state) { + i915_gem_object_unpin_map(engine->default_state); + engine->pinned_default_state = NULL; + } + + engine->execlists.no_priolist = false; + + intel_gt_pm_put(engine->gt); + return 0; +} + +static const struct intel_wakeref_ops wf_ops = { + .get = __engine_unpark, + .put = __engine_park, +}; + +void intel_engine_init__pm(struct intel_engine_cs *engine) +{ + struct intel_runtime_pm *rpm = engine->uncore->rpm; + + intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); + intel_engine_init_heartbeat(engine); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_engine_pm.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h new file mode 100644 index 000000000000..739c50fefcef --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_ENGINE_PM_H +#define INTEL_ENGINE_PM_H + +#include "intel_engine_types.h" +#include "intel_wakeref.h" + +static inline bool +intel_engine_pm_is_awake(const struct intel_engine_cs *engine) +{ + return intel_wakeref_is_active(&engine->wakeref); +} + +static inline void intel_engine_pm_get(struct intel_engine_cs *engine) +{ + intel_wakeref_get(&engine->wakeref); +} + +static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) +{ + return intel_wakeref_get_if_active(&engine->wakeref); +} + +static inline void intel_engine_pm_put(struct intel_engine_cs *engine) +{ + intel_wakeref_put(&engine->wakeref); +} + +void intel_engine_init__pm(struct intel_engine_cs *engine); + +#endif /* INTEL_ENGINE_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c new file mode 100644 index 000000000000..397186818305 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c @@ -0,0 +1,190 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#include "gem/i915_gem_object.h" + +#include "i915_drv.h" +#include "intel_engine_pm.h" +#include "intel_engine_pool.h" + +static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool) +{ + return container_of(pool, struct intel_engine_cs, pool); +} + +static struct list_head * +bucket_for_size(struct intel_engine_pool *pool, size_t sz) +{ + int n; + + /* + * Compute a power-of-two bucket, but throw everything greater than + * 16KiB into the same bucket: i.e. the buckets hold objects of + * (1 page, 2 pages, 4 pages, 8+ pages). + */ + n = fls(sz >> PAGE_SHIFT) - 1; + if (n >= ARRAY_SIZE(pool->cache_list)) + n = ARRAY_SIZE(pool->cache_list) - 1; + + return &pool->cache_list[n]; +} + +static void node_free(struct intel_engine_pool_node *node) +{ + i915_gem_object_put(node->obj); + i915_active_fini(&node->active); + kfree(node); +} + +static int pool_active(struct i915_active *ref) +{ + struct intel_engine_pool_node *node = + container_of(ref, typeof(*node), active); + struct dma_resv *resv = node->obj->base.resv; + int err; + + if (dma_resv_trylock(resv)) { + dma_resv_add_excl_fence(resv, NULL); + dma_resv_unlock(resv); + } + + err = i915_gem_object_pin_pages(node->obj); + if (err) + return err; + + /* Hide this pinned object from the shrinker until retired */ + i915_gem_object_make_unshrinkable(node->obj); + + return 0; +} + +__i915_active_call +static void pool_retire(struct i915_active *ref) +{ + struct intel_engine_pool_node *node = + container_of(ref, typeof(*node), active); + struct intel_engine_pool *pool = node->pool; + struct list_head *list = bucket_for_size(pool, node->obj->base.size); + unsigned long flags; + + GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool))); + + i915_gem_object_unpin_pages(node->obj); + + /* Return this object to the shrinker pool */ + i915_gem_object_make_purgeable(node->obj); + + spin_lock_irqsave(&pool->lock, flags); + list_add(&node->link, list); + spin_unlock_irqrestore(&pool->lock, flags); +} + +static struct intel_engine_pool_node * +node_create(struct intel_engine_pool *pool, size_t sz) +{ + struct intel_engine_cs *engine = to_engine(pool); + struct intel_engine_pool_node *node; + struct drm_i915_gem_object *obj; + + node = kmalloc(sizeof(*node), + GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (!node) + return ERR_PTR(-ENOMEM); + + node->pool = pool; + i915_active_init(&node->active, pool_active, pool_retire); + + obj = i915_gem_object_create_internal(engine->i915, sz); + if (IS_ERR(obj)) { + i915_active_fini(&node->active); + kfree(node); + return ERR_CAST(obj); + } + + i915_gem_object_set_readonly(obj); + + node->obj = obj; + return node; +} + +static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine) +{ + if (intel_engine_is_virtual(engine)) + engine = intel_virtual_engine_get_sibling(engine, 0); + + GEM_BUG_ON(!engine); + return &engine->pool; +} + +struct intel_engine_pool_node * +intel_engine_get_pool(struct intel_engine_cs *engine, size_t size) +{ + struct intel_engine_pool *pool = lookup_pool(engine); + struct intel_engine_pool_node *node; + struct list_head *list; + unsigned long flags; + int ret; + + GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool))); + + size = PAGE_ALIGN(size); + list = bucket_for_size(pool, size); + + spin_lock_irqsave(&pool->lock, flags); + list_for_each_entry(node, list, link) { + if (node->obj->base.size < size) + continue; + list_del(&node->link); + break; + } + spin_unlock_irqrestore(&pool->lock, flags); + + if (&node->link == list) { + node = node_create(pool, size); + if (IS_ERR(node)) + return node; + } + + ret = i915_active_acquire(&node->active); + if (ret) { + node_free(node); + return ERR_PTR(ret); + } + + return node; +} + +void intel_engine_pool_init(struct intel_engine_pool *pool) +{ + int n; + + spin_lock_init(&pool->lock); + for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) + INIT_LIST_HEAD(&pool->cache_list[n]); +} + +void intel_engine_pool_park(struct intel_engine_pool *pool) +{ + int n; + + for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { + struct list_head *list = &pool->cache_list[n]; + struct intel_engine_pool_node *node, *nn; + + list_for_each_entry_safe(node, nn, list, link) + node_free(node); + + INIT_LIST_HEAD(list); + } +} + +void intel_engine_pool_fini(struct intel_engine_pool *pool) +{ + int n; + + for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) + GEM_BUG_ON(!list_empty(&pool->cache_list[n])); +} diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h new file mode 100644 index 000000000000..1bd89cadc3b7 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef INTEL_ENGINE_POOL_H +#define INTEL_ENGINE_POOL_H + +#include "intel_engine_pool_types.h" +#include "i915_active.h" +#include "i915_request.h" + +struct intel_engine_pool_node * +intel_engine_get_pool(struct intel_engine_cs *engine, size_t size); + +static inline int +intel_engine_pool_mark_active(struct intel_engine_pool_node *node, + struct i915_request *rq) +{ + return i915_active_add_request(&node->active, rq); +} + +static inline void +intel_engine_pool_put(struct intel_engine_pool_node *node) +{ + i915_active_release(&node->active); +} + +void intel_engine_pool_init(struct intel_engine_pool *pool); +void intel_engine_pool_park(struct intel_engine_pool *pool); +void intel_engine_pool_fini(struct intel_engine_pool *pool); + +#endif /* INTEL_ENGINE_POOL_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h new file mode 100644 index 000000000000..e31ee361b76f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef INTEL_ENGINE_POOL_TYPES_H +#define INTEL_ENGINE_POOL_TYPES_H + +#include <linux/list.h> +#include <linux/spinlock.h> + +#include "i915_active_types.h" + +struct drm_i915_gem_object; + +struct intel_engine_pool { + spinlock_t lock; + struct list_head cache_list[4]; +}; + +struct intel_engine_pool_node { + struct i915_active active; + struct drm_i915_gem_object *obj; + struct list_head link; + struct intel_engine_pool *pool; +}; + +#endif /* INTEL_ENGINE_POOL_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h new file mode 100644 index 000000000000..758f0e8ec672 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -0,0 +1,593 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_ENGINE_TYPES__ +#define __INTEL_ENGINE_TYPES__ + +#include <linux/hashtable.h> +#include <linux/irq_work.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/llist.h> +#include <linux/rbtree.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include "i915_gem.h" +#include "i915_pmu.h" +#include "i915_priolist_types.h" +#include "i915_selftest.h" +#include "intel_engine_pool_types.h" +#include "intel_sseu.h" +#include "intel_timeline_types.h" +#include "intel_wakeref.h" +#include "intel_workarounds_types.h" + +/* Legacy HW Engine ID */ + +#define RCS0_HW 0 +#define VCS0_HW 1 +#define BCS0_HW 2 +#define VECS0_HW 3 +#define VCS1_HW 4 +#define VCS2_HW 6 +#define VCS3_HW 7 +#define VECS1_HW 12 + +/* Gen11+ HW Engine class + instance */ +#define RENDER_CLASS 0 +#define VIDEO_DECODE_CLASS 1 +#define VIDEO_ENHANCEMENT_CLASS 2 +#define COPY_ENGINE_CLASS 3 +#define OTHER_CLASS 4 +#define MAX_ENGINE_CLASS 4 +#define MAX_ENGINE_INSTANCE 3 + +#define I915_MAX_SLICES 3 +#define I915_MAX_SUBSLICES 8 + +#define I915_CMD_HASH_ORDER 9 + +struct dma_fence; +struct drm_i915_gem_object; +struct drm_i915_reg_table; +struct i915_gem_context; +struct i915_request; +struct i915_sched_attr; +struct intel_gt; +struct intel_ring; +struct intel_uncore; + +typedef u8 intel_engine_mask_t; +#define ALL_ENGINES ((intel_engine_mask_t)~0ul) + +struct intel_hw_status_page { + struct i915_vma *vma; + u32 *addr; +}; + +struct intel_instdone { + u32 instdone; + /* The following exist only in the RCS engine */ + u32 slice_common; + u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; + u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; +}; + +/* + * we use a single page to load ctx workarounds so all of these + * values are referred in terms of dwords + * + * struct i915_wa_ctx_bb: + * offset: specifies batch starting position, also helpful in case + * if we want to have multiple batches at different offsets based on + * some criteria. It is not a requirement at the moment but provides + * an option for future use. + * size: size of the batch in DWORDS + */ +struct i915_ctx_workarounds { + struct i915_wa_ctx_bb { + u32 offset; + u32 size; + } indirect_ctx, per_ctx; + struct i915_vma *vma; +}; + +#define I915_MAX_VCS 4 +#define I915_MAX_VECS 2 + +/* + * Engine IDs definitions. + * Keep instances of the same type engine together. + */ +enum intel_engine_id { + RCS0 = 0, + BCS0, + VCS0, + VCS1, + VCS2, + VCS3, +#define _VCS(n) (VCS0 + (n)) + VECS0, + VECS1, +#define _VECS(n) (VECS0 + (n)) + I915_NUM_ENGINES +#define INVALID_ENGINE ((enum intel_engine_id)-1) +}; + +struct st_preempt_hang { + struct completion completion; + unsigned int count; + bool inject_hang; +}; + +/** + * struct intel_engine_execlists - execlist submission queue and port state + * + * The struct intel_engine_execlists represents the combined logical state of + * driver and the hardware state for execlist mode of submission. + */ +struct intel_engine_execlists { + /** + * @tasklet: softirq tasklet for bottom handler + */ + struct tasklet_struct tasklet; + + /** + * @timer: kick the current context if its timeslice expires + */ + struct timer_list timer; + + /** + * @preempt: reset the current context if it fails to give way + */ + struct timer_list preempt; + + /** + * @default_priolist: priority list for I915_PRIORITY_NORMAL + */ + struct i915_priolist default_priolist; + + /** + * @no_priolist: priority lists disabled + */ + bool no_priolist; + + /** + * @submit_reg: gen-specific execlist submission register + * set to the ExecList Submission Port (elsp) register pre-Gen11 and to + * the ExecList Submission Queue Contents register array for Gen11+ + */ + u32 __iomem *submit_reg; + + /** + * @ctrl_reg: the enhanced execlists control register, used to load the + * submit queue on the HW and to request preemptions to idle + */ + u32 __iomem *ctrl_reg; + +#define EXECLIST_MAX_PORTS 2 + /** + * @active: the currently known context executing on HW + */ + struct i915_request * const *active; + /** + * @inflight: the set of contexts submitted and acknowleged by HW + * + * The set of inflight contexts is managed by reading CS events + * from the HW. On a context-switch event (not preemption), we + * know the HW has transitioned from port0 to port1, and we + * advance our inflight/active tracking accordingly. + */ + struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */]; + /** + * @pending: the next set of contexts submitted to ELSP + * + * We store the array of contexts that we submit to HW (via ELSP) and + * promote them to the inflight array once HW has signaled the + * preemption or idle-to-active event. + */ + struct i915_request *pending[EXECLIST_MAX_PORTS + 1]; + + /** + * @port_mask: number of execlist ports - 1 + */ + unsigned int port_mask; + + /** + * @switch_priority_hint: Second context priority. + * + * We submit multiple contexts to the HW simultaneously and would + * like to occasionally switch between them to emulate timeslicing. + * To know when timeslicing is suitable, we track the priority of + * the context submitted second. + */ + int switch_priority_hint; + + /** + * @queue_priority_hint: Highest pending priority. + * + * When we add requests into the queue, or adjust the priority of + * executing requests, we compute the maximum priority of those + * pending requests. We can then use this value to determine if + * we need to preempt the executing requests to service the queue. + * However, since the we may have recorded the priority of an inflight + * request we wanted to preempt but since completed, at the time of + * dequeuing the priority hint may no longer may match the highest + * available request priority. + */ + int queue_priority_hint; + + /** + * @queue: queue of requests, in priority lists + */ + struct rb_root_cached queue; + struct rb_root_cached virtual; + + /** + * @csb_write: control register for Context Switch buffer + * + * Note this register may be either mmio or HWSP shadow. + */ + u32 *csb_write; + + /** + * @csb_status: status array for Context Switch buffer + * + * Note these register may be either mmio or HWSP shadow. + */ + u32 *csb_status; + + /** + * @csb_size: context status buffer FIFO size + */ + u8 csb_size; + + /** + * @csb_head: context status buffer head + */ + u8 csb_head; + + I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) +}; + +#define INTEL_ENGINE_CS_MAX_NAME 8 + +struct intel_engine_cs { + struct drm_i915_private *i915; + struct intel_gt *gt; + struct intel_uncore *uncore; + char name[INTEL_ENGINE_CS_MAX_NAME]; + + enum intel_engine_id id; + enum intel_engine_id legacy_idx; + + unsigned int hw_id; + unsigned int guc_id; + + intel_engine_mask_t mask; + + u8 class; + u8 instance; + + u8 uabi_class; + u8 uabi_instance; + + u32 uabi_capabilities; + u32 context_size; + u32 mmio_base; + + unsigned int context_tag; +#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS) + + struct rb_node uabi_node; + + struct intel_sseu sseu; + + struct { + spinlock_t lock; + struct list_head requests; + } active; + + struct llist_head barrier_tasks; + + struct intel_context *kernel_context; /* pinned */ + + intel_engine_mask_t saturated; /* submitting semaphores too late? */ + + struct { + struct delayed_work work; + struct i915_request *systole; + } heartbeat; + + unsigned long serial; + + unsigned long wakeref_serial; + struct intel_wakeref wakeref; + struct drm_i915_gem_object *default_state; + void *pinned_default_state; + + struct { + struct intel_ring *ring; + struct intel_timeline *timeline; + } legacy; + + /* Rather than have every client wait upon all user interrupts, + * with the herd waking after every interrupt and each doing the + * heavyweight seqno dance, we delegate the task (of being the + * bottom-half of the user interrupt) to the first client. After + * every interrupt, we wake up one client, who does the heavyweight + * coherent seqno read and either goes back to sleep (if incomplete), + * or wakes up all the completed clients in parallel, before then + * transferring the bottom-half status to the next client in the queue. + * + * Compared to walking the entire list of waiters in a single dedicated + * bottom-half, we reduce the latency of the first waiter by avoiding + * a context switch, but incur additional coherent seqno reads when + * following the chain of request breadcrumbs. Since it is most likely + * that we have a single client waiting on each seqno, then reducing + * the overhead of waking that client is much preferred. + */ + struct intel_breadcrumbs { + spinlock_t irq_lock; + struct list_head signalers; + + struct irq_work irq_work; /* for use from inside irq_lock */ + + unsigned int irq_enabled; + + bool irq_armed; + } breadcrumbs; + + struct intel_engine_pmu { + /** + * @enable: Bitmask of enable sample events on this engine. + * + * Bits correspond to sample event types, for instance + * I915_SAMPLE_QUEUED is bit 0 etc. + */ + u32 enable; + /** + * @enable_count: Reference count for the enabled samplers. + * + * Index number corresponds to @enum drm_i915_pmu_engine_sample. + */ + unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; + /** + * @sample: Counter values for sampling events. + * + * Our internal timer stores the current counters in this field. + * + * Index number corresponds to @enum drm_i915_pmu_engine_sample. + */ + struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; + } pmu; + + /* + * A pool of objects to use as shadow copies of client batch buffers + * when the command parser is enabled. Prevents the client from + * modifying the batch contents after software parsing. + */ + struct intel_engine_pool pool; + + struct intel_hw_status_page status_page; + struct i915_ctx_workarounds wa_ctx; + struct i915_wa_list ctx_wa_list; + struct i915_wa_list wa_list; + struct i915_wa_list whitelist; + + u32 irq_keep_mask; /* always keep these interrupts */ + u32 irq_enable_mask; /* bitmask to enable ring interrupt */ + void (*irq_enable)(struct intel_engine_cs *engine); + void (*irq_disable)(struct intel_engine_cs *engine); + + int (*resume)(struct intel_engine_cs *engine); + + struct { + void (*prepare)(struct intel_engine_cs *engine); + void (*reset)(struct intel_engine_cs *engine, bool stalled); + void (*finish)(struct intel_engine_cs *engine); + } reset; + + void (*park)(struct intel_engine_cs *engine); + void (*unpark)(struct intel_engine_cs *engine); + + void (*set_default_submission)(struct intel_engine_cs *engine); + + const struct intel_context_ops *cops; + + int (*request_alloc)(struct i915_request *rq); + + int (*emit_flush)(struct i915_request *request, u32 mode); +#define EMIT_INVALIDATE BIT(0) +#define EMIT_FLUSH BIT(1) +#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) + int (*emit_bb_start)(struct i915_request *rq, + u64 offset, u32 length, + unsigned int dispatch_flags); +#define I915_DISPATCH_SECURE BIT(0) +#define I915_DISPATCH_PINNED BIT(1) + int (*emit_init_breadcrumb)(struct i915_request *rq); + u32 *(*emit_fini_breadcrumb)(struct i915_request *rq, + u32 *cs); + unsigned int emit_fini_breadcrumb_dw; + + /* Pass the request to the hardware queue (e.g. directly into + * the legacy ringbuffer or to the end of an execlist). + * + * This is called from an atomic context with irqs disabled; must + * be irq safe. + */ + void (*submit_request)(struct i915_request *rq); + + /* + * Called on signaling of a SUBMIT_FENCE, passing along the signaling + * request down to the bonded pairs. + */ + void (*bond_execute)(struct i915_request *rq, + struct dma_fence *signal); + + /* + * Call when the priority on a request has changed and it and its + * dependencies may need rescheduling. Note the request itself may + * not be ready to run! + */ + void (*schedule)(struct i915_request *request, + const struct i915_sched_attr *attr); + + /* + * Cancel all requests on the hardware, or queued for execution. + * This should only cancel the ready requests that have been + * submitted to the engine (via the engine->submit_request callback). + * This is called when marking the device as wedged. + */ + void (*cancel_requests)(struct intel_engine_cs *engine); + + void (*destroy)(struct intel_engine_cs *engine); + + struct intel_engine_execlists execlists; + + /* status_notifier: list of callbacks for context-switch changes */ + struct atomic_notifier_head context_status_notifier; + +#define I915_ENGINE_USING_CMD_PARSER BIT(0) +#define I915_ENGINE_SUPPORTS_STATS BIT(1) +#define I915_ENGINE_HAS_PREEMPTION BIT(2) +#define I915_ENGINE_HAS_SEMAPHORES BIT(3) +#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) +#define I915_ENGINE_IS_VIRTUAL BIT(5) +#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) + unsigned int flags; + + /* + * Table of commands the command parser needs to know about + * for this engine. + */ + DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); + + /* + * Table of registers allowed in commands that read/write registers. + */ + const struct drm_i915_reg_table *reg_tables; + int reg_table_count; + + /* + * Returns the bitmask for the length field of the specified command. + * Return 0 for an unrecognized/invalid command. + * + * If the command parser finds an entry for a command in the engine's + * cmd_tables, it gets the command's length based on the table entry. + * If not, it calls this function to determine the per-engine length + * field encoding for the command (i.e. different opcode ranges use + * certain bits to encode the command length in the header). + */ + u32 (*get_cmd_length_mask)(u32 cmd_header); + + struct { + /** + * @lock: Lock protecting the below fields. + */ + seqlock_t lock; + /** + * @enabled: Reference count indicating number of listeners. + */ + unsigned int enabled; + /** + * @active: Number of contexts currently scheduled in. + */ + unsigned int active; + /** + * @enabled_at: Timestamp when busy stats were enabled. + */ + ktime_t enabled_at; + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; + /** + * @total: Total time this engine was busy. + * + * Accumulated time not counting the most recent block in cases + * where engine is currently busy (active > 0). + */ + ktime_t total; + } stats; + + struct { + unsigned long heartbeat_interval_ms; + unsigned long preempt_timeout_ms; + unsigned long stop_timeout_ms; + unsigned long timeslice_duration_ms; + } props; +}; + +static inline bool +intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_USING_CMD_PARSER; +} + +static inline bool +intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; +} + +static inline bool +intel_engine_supports_stats(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_SUPPORTS_STATS; +} + +static inline bool +intel_engine_has_preemption(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_HAS_PREEMPTION; +} + +static inline bool +intel_engine_has_semaphores(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_HAS_SEMAPHORES; +} + +static inline bool +intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; +} + +static inline bool +intel_engine_is_virtual(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_IS_VIRTUAL; +} + +static inline bool +intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) +{ + return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO; +} + +#define instdone_has_slice(dev_priv___, sseu___, slice___) \ + ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___)) + +#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \ + (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \ + intel_sseu_has_subslice(sseu__, 0, subslice__)) + +#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \ + for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \ + (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \ + (slice_) += ((subslice_) == 0)) \ + for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \ + (instdone_has_subslice(dev_priv_, sseu_, slice_, \ + subslice_))) +#endif /* __INTEL_ENGINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c new file mode 100644 index 000000000000..7f7150a733f4 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -0,0 +1,295 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/list.h> +#include <linux/list_sort.h> +#include <linux/llist.h> + +#include "i915_drv.h" +#include "intel_engine.h" +#include "intel_engine_user.h" + +struct intel_engine_cs * +intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) +{ + struct rb_node *p = i915->uabi_engines.rb_node; + + while (p) { + struct intel_engine_cs *it = + rb_entry(p, typeof(*it), uabi_node); + + if (class < it->uabi_class) + p = p->rb_left; + else if (class > it->uabi_class || + instance > it->uabi_instance) + p = p->rb_right; + else if (instance < it->uabi_instance) + p = p->rb_left; + else + return it; + } + + return NULL; +} + +void intel_engine_add_user(struct intel_engine_cs *engine) +{ + llist_add((struct llist_node *)&engine->uabi_node, + (struct llist_head *)&engine->i915->uabi_engines); +} + +static const u8 uabi_classes[] = { + [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER, + [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY, + [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO, + [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE, +}; + +static int engine_cmp(void *priv, struct list_head *A, struct list_head *B) +{ + const struct intel_engine_cs *a = + container_of((struct rb_node *)A, typeof(*a), uabi_node); + const struct intel_engine_cs *b = + container_of((struct rb_node *)B, typeof(*b), uabi_node); + + if (uabi_classes[a->class] < uabi_classes[b->class]) + return -1; + if (uabi_classes[a->class] > uabi_classes[b->class]) + return 1; + + if (a->instance < b->instance) + return -1; + if (a->instance > b->instance) + return 1; + + return 0; +} + +static struct llist_node *get_engines(struct drm_i915_private *i915) +{ + return llist_del_all((struct llist_head *)&i915->uabi_engines); +} + +static void sort_engines(struct drm_i915_private *i915, + struct list_head *engines) +{ + struct llist_node *pos, *next; + + llist_for_each_safe(pos, next, get_engines(i915)) { + struct intel_engine_cs *engine = + container_of((struct rb_node *)pos, typeof(*engine), + uabi_node); + list_add((struct list_head *)&engine->uabi_node, engines); + } + list_sort(NULL, engines, engine_cmp); +} + +static void set_scheduler_caps(struct drm_i915_private *i915) +{ + static const struct { + u8 engine; + u8 sched; + } map[] = { +#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) } + MAP(HAS_PREEMPTION, PREEMPTION), + MAP(HAS_SEMAPHORES, SEMAPHORES), + MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), +#undef MAP + }; + struct intel_engine_cs *engine; + u32 enabled, disabled; + + enabled = 0; + disabled = 0; + for_each_uabi_engine(engine, i915) { /* all engines must agree! */ + int i; + + if (engine->schedule) + enabled |= (I915_SCHEDULER_CAP_ENABLED | + I915_SCHEDULER_CAP_PRIORITY); + else + disabled |= (I915_SCHEDULER_CAP_ENABLED | + I915_SCHEDULER_CAP_PRIORITY); + + for (i = 0; i < ARRAY_SIZE(map); i++) { + if (engine->flags & BIT(map[i].engine)) + enabled |= BIT(map[i].sched); + else + disabled |= BIT(map[i].sched); + } + } + + i915->caps.scheduler = enabled & ~disabled; + if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) + i915->caps.scheduler = 0; +} + +const char *intel_engine_class_repr(u8 class) +{ + static const char * const uabi_names[] = { + [RENDER_CLASS] = "rcs", + [COPY_ENGINE_CLASS] = "bcs", + [VIDEO_DECODE_CLASS] = "vcs", + [VIDEO_ENHANCEMENT_CLASS] = "vecs", + }; + + if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class]) + return "xxx"; + + return uabi_names[class]; +} + +struct legacy_ring { + struct intel_gt *gt; + u8 class; + u8 instance; +}; + +static int legacy_ring_idx(const struct legacy_ring *ring) +{ + static const struct { + u8 base, max; + } map[] = { + [RENDER_CLASS] = { RCS0, 1 }, + [COPY_ENGINE_CLASS] = { BCS0, 1 }, + [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS }, + [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS }, + }; + + if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map))) + return INVALID_ENGINE; + + if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max)) + return INVALID_ENGINE; + + return map[ring->class].base + ring->instance; +} + +static void add_legacy_ring(struct legacy_ring *ring, + struct intel_engine_cs *engine) +{ + if (engine->gt != ring->gt || engine->class != ring->class) { + ring->gt = engine->gt; + ring->class = engine->class; + ring->instance = 0; + } + + engine->legacy_idx = legacy_ring_idx(ring); + if (engine->legacy_idx != INVALID_ENGINE) + ring->instance++; +} + +void intel_engines_driver_register(struct drm_i915_private *i915) +{ + struct legacy_ring ring = {}; + u8 uabi_instances[4] = {}; + struct list_head *it, *next; + struct rb_node **p, *prev; + LIST_HEAD(engines); + + sort_engines(i915, &engines); + + prev = NULL; + p = &i915->uabi_engines.rb_node; + list_for_each_safe(it, next, &engines) { + struct intel_engine_cs *engine = + container_of((struct rb_node *)it, typeof(*engine), + uabi_node); + char old[sizeof(engine->name)]; + + GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); + engine->uabi_class = uabi_classes[engine->class]; + + GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances)); + engine->uabi_instance = uabi_instances[engine->uabi_class]++; + + /* Replace the internal name with the final user facing name */ + memcpy(old, engine->name, sizeof(engine->name)); + scnprintf(engine->name, sizeof(engine->name), "%s%u", + intel_engine_class_repr(engine->class), + engine->uabi_instance); + DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name); + + rb_link_node(&engine->uabi_node, prev, p); + rb_insert_color(&engine->uabi_node, &i915->uabi_engines); + + GEM_BUG_ON(intel_engine_lookup_user(i915, + engine->uabi_class, + engine->uabi_instance) != engine); + + /* Fix up the mapping to match default execbuf::user_map[] */ + add_legacy_ring(&ring, engine); + + prev = &engine->uabi_node; + p = &prev->rb_right; + } + + if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) && + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { + struct intel_engine_cs *engine; + unsigned int isolation; + int class, inst; + int errors = 0; + + for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) { + for (inst = 0; inst < uabi_instances[class]; inst++) { + engine = intel_engine_lookup_user(i915, + class, inst); + if (!engine) { + pr_err("UABI engine not found for { class:%d, instance:%d }\n", + class, inst); + errors++; + continue; + } + + if (engine->uabi_class != class || + engine->uabi_instance != inst) { + pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n", + engine->name, + engine->uabi_class, + engine->uabi_instance, + class, inst); + errors++; + continue; + } + } + } + + /* + * Make sure that classes with multiple engine instances all + * share the same basic configuration. + */ + isolation = intel_engines_has_context_isolation(i915); + for_each_uabi_engine(engine, i915) { + unsigned int bit = BIT(engine->uabi_class); + unsigned int expected = engine->default_state ? bit : 0; + + if ((isolation & bit) != expected) { + pr_err("mismatching default context state for class %d on engine %s\n", + engine->uabi_class, engine->name); + errors++; + } + } + + if (WARN(errors, "Invalid UABI engine mapping found")) + i915->uabi_engines = RB_ROOT; + } + + set_scheduler_caps(i915); +} + +unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + unsigned int which; + + which = 0; + for_each_uabi_engine(engine, i915) + if (engine->default_state) + which |= BIT(engine->uabi_class); + + return which; +} diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.h b/drivers/gpu/drm/i915/gt/intel_engine_user.h new file mode 100644 index 000000000000..f845ea1cbfaa --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.h @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_ENGINE_USER_H +#define INTEL_ENGINE_USER_H + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_engine_cs; + +struct intel_engine_cs * +intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance); + +unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915); + +void intel_engine_add_user(struct intel_engine_cs *engine); +void intel_engines_driver_register(struct drm_i915_private *i915); + +const char *intel_engine_class_repr(u8 class); + +#endif /* INTEL_ENGINE_USER_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h new file mode 100644 index 000000000000..4294f146f13c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -0,0 +1,322 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright � 2003-2018 Intel Corporation + */ + +#ifndef _INTEL_GPU_COMMANDS_H_ +#define _INTEL_GPU_COMMANDS_H_ + +/* + * Target address alignments required for GPU access e.g. + * MI_STORE_DWORD_IMM. + */ +#define alignof_dword 4 +#define alignof_qword 8 + +/* + * Instruction field definitions used by the command parser + */ +#define INSTR_CLIENT_SHIFT 29 +#define INSTR_MI_CLIENT 0x0 +#define INSTR_BC_CLIENT 0x2 +#define INSTR_RC_CLIENT 0x3 +#define INSTR_SUBCLIENT_SHIFT 27 +#define INSTR_SUBCLIENT_MASK 0x18000000 +#define INSTR_MEDIA_SUBCLIENT 0x2 +#define INSTR_26_TO_24_MASK 0x7000000 +#define INSTR_26_TO_24_SHIFT 24 + +/* + * Memory interface instructions used by the kernel + */ +#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) +/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */ +#define MI_GLOBAL_GTT (1<<22) + +#define MI_NOOP MI_INSTR(0, 0) +#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) +#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) +#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) +#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) +#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) +#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) +#define MI_FLUSH MI_INSTR(0x04, 0) +#define MI_READ_FLUSH (1 << 0) +#define MI_EXE_FLUSH (1 << 1) +#define MI_NO_WRITE_FLUSH (1 << 2) +#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ +#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ +#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ +#define MI_REPORT_HEAD MI_INSTR(0x07, 0) +#define MI_ARB_ON_OFF MI_INSTR(0x08, 0) +#define MI_ARB_ENABLE (1<<0) +#define MI_ARB_DISABLE (0<<0) +#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) +#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) +#define MI_SUSPEND_FLUSH_EN (1<<0) +#define MI_SET_APPID MI_INSTR(0x0e, 0) +#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) +#define MI_OVERLAY_CONTINUE (0x0<<21) +#define MI_OVERLAY_ON (0x1<<21) +#define MI_OVERLAY_OFF (0x2<<21) +#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) +#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) +#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) +#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) +/* IVB has funny definitions for which plane to flip. */ +#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) +/* SKL ones */ +#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8) +#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */ +#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) +#define MI_SEMAPHORE_UPDATE (1<<21) +#define MI_SEMAPHORE_COMPARE (1<<20) +#define MI_SEMAPHORE_REGISTER (1<<18) +#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */ +#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */ +#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */ +#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */ +#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */ +#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */ +#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */ +#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */ +#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */ +#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ +#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ +#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ +#define MI_SEMAPHORE_SYNC_INVALID (3<<16) +#define MI_SEMAPHORE_SYNC_MASK (3<<16) +#define MI_SET_CONTEXT MI_INSTR(0x18, 0) +#define MI_MM_SPACE_GTT (1<<8) +#define MI_MM_SPACE_PHYSICAL (0<<8) +#define MI_SAVE_EXT_STATE_EN (1<<3) +#define MI_RESTORE_EXT_STATE_EN (1<<2) +#define MI_FORCE_RESTORE (1<<1) +#define MI_RESTORE_INHIBIT (1<<0) +#define HSW_MI_RS_SAVE_STATE_EN (1<<3) +#define HSW_MI_RS_RESTORE_STATE_EN (1<<2) +#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ +#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) +#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ +#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */ +#define MI_SEMAPHORE_POLL (1 << 15) +#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) +#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) +#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12) +#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12) +#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12) +#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12) +#define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5) +#define MI_SEMAPHORE_TOKEN_SHIFT 5 +#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) +#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) +#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ +#define MI_USE_GGTT (1 << 22) /* g4x+ */ +#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) +/* + * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: + * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw + * simply ignores the register load under certain conditions. + * - One can actually load arbitrary many arbitrary registers: Simply issue x + * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! + */ +#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) +/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */ +#define MI_LRI_CS_MMIO (1<<19) +#define MI_LRI_FORCE_POSTED (1<<12) +#define MI_LOAD_REGISTER_IMM_MAX_REGS (126) +#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1) +#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2) +#define MI_SRM_LRM_GLOBAL_GTT (1<<22) +#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ +#define MI_FLUSH_DW_STORE_INDEX (1<<21) +#define MI_INVALIDATE_TLB (1<<18) +#define MI_FLUSH_DW_OP_STOREDW (1<<14) +#define MI_FLUSH_DW_OP_MASK (3<<14) +#define MI_FLUSH_DW_NOTIFY (1<<8) +#define MI_INVALIDATE_BSD (1<<7) +#define MI_FLUSH_DW_USE_GTT (1<<2) +#define MI_FLUSH_DW_USE_PPGTT (0<<2) +#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1) +#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2) +#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1) +#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) +#define MI_BATCH_NON_SECURE (1) +/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ +#define MI_BATCH_NON_SECURE_I965 (1<<8) +#define MI_BATCH_PPGTT_HSW (1<<8) +#define MI_BATCH_NON_SECURE_HSW (1<<13) +#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) +#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ +#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) +#define MI_BATCH_RESOURCE_STREAMER REG_BIT(10) +#define MI_BATCH_PREDICATE REG_BIT(15) /* HSW+ on RCS only*/ + +/* + * 3D instructions used by the kernel + */ +#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) + +#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4) +#define GEN9_MEDIA_POOL_ENABLE (1 << 31) +#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) +#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define SC_UPDATE_SCISSOR (0x1<<1) +#define SC_ENABLE_MASK (0x1<<0) +#define SC_ENABLE (0x1<<0) +#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) +#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) +#define SCI_YMIN_MASK (0xffff<<16) +#define SCI_XMIN_MASK (0xffff<<0) +#define SCI_YMAX_MASK (0xffff<<16) +#define SCI_XMAX_MASK (0xffff<<0) +#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) +#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) +#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) +#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) +#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) +#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) + +#define COLOR_BLT_CMD (2 << 29 | 0x40 << 22 | (5 - 2)) +#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22) +#define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22) +#define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22) +#define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22) +#define XY_MONO_SRC_COPY_IMM_BLT (2 << 29 | 0x71 << 22 | 5) +#define BLT_WRITE_A (2<<20) +#define BLT_WRITE_RGB (1<<20) +#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) +#define BLT_DEPTH_8 (0<<24) +#define BLT_DEPTH_16_565 (1<<24) +#define BLT_DEPTH_16_1555 (2<<24) +#define BLT_DEPTH_32 (3<<24) +#define BLT_ROP_SRC_COPY (0xcc<<16) +#define BLT_ROP_COLOR_COPY (0xf0<<16) +#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ +#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ +#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) +#define ASYNC_FLIP (1<<22) +#define DISPLAY_PLANE_A (0<<20) +#define DISPLAY_PLANE_B (1<<20) +#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2)) +#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29) /* gen11+ */ +#define PIPE_CONTROL_TILE_CACHE_FLUSH (1<<28) /* gen11+ */ +#define PIPE_CONTROL_FLUSH_L3 (1<<27) +#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ +#define PIPE_CONTROL_MMIO_WRITE (1<<23) +#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) +#define PIPE_CONTROL_CS_STALL (1<<20) +#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) +#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) +#define PIPE_CONTROL_WRITE_TIMESTAMP (3<<14) +#define PIPE_CONTROL_QW_WRITE (1<<14) +#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) +#define PIPE_CONTROL_DEPTH_STALL (1<<13) +#define PIPE_CONTROL_WRITE_FLUSH (1<<12) +#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ +#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */ +#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ +#define PIPE_CONTROL_L3_RO_CACHE_INVALIDATE REG_BIT(10) /* gen12 */ +#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) +#define PIPE_CONTROL_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */ +#define PIPE_CONTROL_NOTIFY (1<<8) +#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */ +#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5) +#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) +#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) +#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) +#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) +#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) +#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ + +#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1) +#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2)) +/* Opcodes for MI_MATH_INSTR */ +#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0) +#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2) +#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2) +#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1) +#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1) +#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0) +#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0) +#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0) +#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0) +#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0) +#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2) +#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2) +/* Registers used as operands in MI_MATH_INSTR */ +#define MI_MATH_REG(x) (x) +#define MI_MATH_REG_SRCA 0x20 +#define MI_MATH_REG_SRCB 0x21 +#define MI_MATH_REG_ACCU 0x31 +#define MI_MATH_REG_ZF 0x32 +#define MI_MATH_REG_CF 0x33 + +/* + * Commands used only by the command parser + */ +#define MI_SET_PREDICATE MI_INSTR(0x01, 0) +#define MI_ARB_CHECK MI_INSTR(0x05, 0) +#define MI_RS_CONTROL MI_INSTR(0x06, 0) +#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0) +#define MI_PREDICATE MI_INSTR(0x0C, 0) +#define MI_RS_CONTEXT MI_INSTR(0x0F, 0) +#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0) +#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0) +#define MI_URB_CLEAR MI_INSTR(0x19, 0) +#define MI_UPDATE_GTT MI_INSTR(0x23, 0) +#define MI_CLFLUSH MI_INSTR(0x27, 0) +#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0) +#define MI_REPORT_PERF_COUNT_GGTT (1<<0) +#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0) +#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0) +#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0) +#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0) + +#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16)) +#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16)) +#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16)) +#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18) +#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16)) +#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16)) +#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16)) +#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16)) +#define GFX_OP_3DSTATE_SO_DECL_LIST \ + ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16)) + +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16)) +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16)) +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16)) +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16)) +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16)) + +#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16)) + +#define COLOR_BLT ((0x2<<29)|(0x40<<22)) +#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22)) + +#endif /* _INTEL_GPU_COMMANDS_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c new file mode 100644 index 000000000000..4c26daf7ee46 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_gt_requests.h" +#include "intel_mocs.h" +#include "intel_rc6.h" +#include "intel_rps.h" +#include "intel_uncore.h" +#include "intel_pm.h" + +void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) +{ + gt->i915 = i915; + gt->uncore = &i915->uncore; + + spin_lock_init(>->irq_lock); + + INIT_LIST_HEAD(>->closed_vma); + spin_lock_init(>->closed_lock); + + intel_gt_init_reset(gt); + intel_gt_init_requests(gt); + intel_gt_pm_init_early(gt); + + intel_rps_init_early(>->rps); + intel_uc_init_early(>->uc); +} + +void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt) +{ + gt->ggtt = ggtt; + + intel_gt_sanitize(gt, false); +} + +static void init_unused_ring(struct intel_gt *gt, u32 base) +{ + struct intel_uncore *uncore = gt->uncore; + + intel_uncore_write(uncore, RING_CTL(base), 0); + intel_uncore_write(uncore, RING_HEAD(base), 0); + intel_uncore_write(uncore, RING_TAIL(base), 0); + intel_uncore_write(uncore, RING_START(base), 0); +} + +static void init_unused_rings(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + if (IS_I830(i915)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + init_unused_ring(gt, SRB2_BASE); + init_unused_ring(gt, SRB3_BASE); + } else if (IS_GEN(i915, 2)) { + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + } else if (IS_GEN(i915, 3)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, PRB2_BASE); + } +} + +int intel_gt_init_hw(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + int ret; + + BUG_ON(!i915->kernel_context); + ret = intel_gt_terminally_wedged(gt); + if (ret) + return ret; + + gt->last_init_time = ktime_get(); + + /* Double layer security blanket, see i915_gem_init() */ + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) + intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); + + if (IS_HASWELL(i915)) + intel_uncore_write(uncore, + MI_PREDICATE_RESULT_2, + IS_HSW_GT3(i915) ? + LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); + + /* Apply the GT workarounds... */ + intel_gt_apply_workarounds(gt); + /* ...and determine whether they are sticking. */ + intel_gt_verify_workarounds(gt, "init"); + + intel_gt_init_swizzling(gt); + + /* + * At least 830 can leave some of the unused rings + * "active" (ie. head != tail) after resume which + * will prevent c3 entry. Makes sure all unused rings + * are totally idle. + */ + init_unused_rings(gt); + + ret = i915_ppgtt_init_hw(gt); + if (ret) { + DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); + goto out; + } + + /* We can't enable contexts until all firmware is loaded */ + ret = intel_uc_init_hw(>->uc); + if (ret) { + i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); + goto out; + } + + intel_mocs_init(gt); + +out: + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + return ret; +} + +static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) +{ + intel_uncore_rmw(uncore, reg, 0, set); +} + +static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) +{ + intel_uncore_rmw(uncore, reg, clr, 0); +} + +static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) +{ + intel_uncore_rmw(uncore, reg, 0, 0); +} + +static void gen8_clear_engine_error_register(struct intel_engine_cs *engine) +{ + GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); + GEN6_RING_FAULT_REG_POSTING_READ(engine); +} + +void +intel_gt_clear_error_registers(struct intel_gt *gt, + intel_engine_mask_t engine_mask) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 eir; + + if (!IS_GEN(i915, 2)) + clear_register(uncore, PGTBL_ER); + + if (INTEL_GEN(i915) < 4) + clear_register(uncore, IPEIR(RENDER_RING_BASE)); + else + clear_register(uncore, IPEIR_I965); + + clear_register(uncore, EIR); + eir = intel_uncore_read(uncore, EIR); + if (eir) { + /* + * some errors might have become stuck, + * mask them. + */ + DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); + rmw_set(uncore, EMR, eir); + intel_uncore_write(uncore, GEN2_IIR, + I915_MASTER_ERROR_INTERRUPT); + } + + if (INTEL_GEN(i915) >= 12) { + rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID); + intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG); + } else if (INTEL_GEN(i915) >= 8) { + rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); + intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); + } else if (INTEL_GEN(i915) >= 6) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine_masked(engine, gt, engine_mask, id) + gen8_clear_engine_error_register(engine); + } +} + +static void gen6_check_faults(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 fault; + + for_each_engine(engine, gt, id) { + fault = GEN6_RING_FAULT_REG_READ(engine); + if (fault & RING_FAULT_VALID) { + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08lx\n" + "\tAddress space: %s\n" + "\tSource ID: %d\n" + "\tType: %d\n", + fault & PAGE_MASK, + fault & RING_FAULT_GTTSEL_MASK ? + "GGTT" : "PPGTT", + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); + } + } +} + +static void gen8_check_faults(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg; + u32 fault; + + if (INTEL_GEN(gt->i915) >= 12) { + fault_reg = GEN12_RING_FAULT_REG; + fault_data0_reg = GEN12_FAULT_TLB_DATA0; + fault_data1_reg = GEN12_FAULT_TLB_DATA1; + } else { + fault_reg = GEN8_RING_FAULT_REG; + fault_data0_reg = GEN8_FAULT_TLB_DATA0; + fault_data1_reg = GEN8_FAULT_TLB_DATA1; + } + + fault = intel_uncore_read(uncore, fault_reg); + if (fault & RING_FAULT_VALID) { + u32 fault_data0, fault_data1; + u64 fault_addr; + + fault_data0 = intel_uncore_read(uncore, fault_data0_reg); + fault_data1 = intel_uncore_read(uncore, fault_data1_reg); + + fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | + ((u64)fault_data0 << 12); + + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08x_%08x\n" + "\tAddress space: %s\n" + "\tEngine ID: %d\n" + "\tSource ID: %d\n" + "\tType: %d\n", + upper_32_bits(fault_addr), + lower_32_bits(fault_addr), + fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", + GEN8_RING_FAULT_ENGINE_ID(fault), + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); + } +} + +void intel_gt_check_and_clear_faults(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + /* From GEN8 onwards we only have one 'All Engine Fault Register' */ + if (INTEL_GEN(i915) >= 8) + gen8_check_faults(gt); + else if (INTEL_GEN(i915) >= 6) + gen6_check_faults(gt); + else + return; + + intel_gt_clear_error_registers(gt, ALL_ENGINES); +} + +void intel_gt_flush_ggtt_writes(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + intel_wakeref_t wakeref; + + /* + * No actual flushing is required for the GTT write domain for reads + * from the GTT domain. Writes to it "immediately" go to main memory + * as far as we know, so there's no chipset flush. It also doesn't + * land in the GPU render cache. + * + * However, we do have to enforce the order so that all writes through + * the GTT land before any writes to the device, such as updates to + * the GATT itself. + * + * We also have to wait a bit for the writes to land from the GTT. + * An uncached read (i.e. mmio) seems to be ideal for the round-trip + * timing. This issue has only been observed when switching quickly + * between GTT writes and CPU reads from inside the kernel on recent hw, + * and it appears to only affect discrete GTT blocks (i.e. on LLC + * system agents we cannot reproduce this behaviour, until Cannonlake + * that was!). + */ + + wmb(); + + if (INTEL_INFO(gt->i915)->has_coherent_ggtt) + return; + + intel_gt_chipset_flush(gt); + + with_intel_runtime_pm(uncore->rpm, wakeref) { + unsigned long flags; + + spin_lock_irqsave(&uncore->lock, flags); + intel_uncore_posting_read_fw(uncore, + RING_HEAD(RENDER_RING_BASE)); + spin_unlock_irqrestore(&uncore->lock, flags); + } +} + +void intel_gt_chipset_flush(struct intel_gt *gt) +{ + wmb(); + if (INTEL_GEN(gt->i915) < 6) + intel_gtt_chipset_flush(); +} + +void intel_gt_driver_register(struct intel_gt *gt) +{ + intel_rps_driver_register(>->rps); +} + +static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) +{ + struct drm_i915_private *i915 = gt->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int ret; + + obj = i915_gem_object_create_stolen(i915, size); + if (IS_ERR(obj)) + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate scratch page\n"); + return PTR_ERR(obj); + } + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unref; + } + + ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (ret) + goto err_unref; + + gt->scratch = i915_vma_make_unshrinkable(vma); + + return 0; + +err_unref: + i915_gem_object_put(obj); + return ret; +} + +static void intel_gt_fini_scratch(struct intel_gt *gt) +{ + i915_vma_unpin_and_release(>->scratch, 0); +} + +int intel_gt_init(struct intel_gt *gt) +{ + int err; + + err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); + if (err) + return err; + + intel_gt_pm_init(gt); + + return 0; +} + +void intel_gt_driver_remove(struct intel_gt *gt) +{ + GEM_BUG_ON(gt->awake); +} + +void intel_gt_driver_unregister(struct intel_gt *gt) +{ + intel_rps_driver_unregister(>->rps); +} + +void intel_gt_driver_release(struct intel_gt *gt) +{ + intel_gt_pm_fini(gt); + intel_gt_fini_scratch(gt); +} + +void intel_gt_driver_late_release(struct intel_gt *gt) +{ + intel_uc_driver_late_release(>->uc); + intel_gt_fini_reset(gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h new file mode 100644 index 000000000000..5436f8c30708 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_GT__ +#define __INTEL_GT__ + +#include "intel_engine_types.h" +#include "intel_gt_types.h" +#include "intel_reset.h" + +struct drm_i915_private; + +static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) +{ + return container_of(uc, struct intel_gt, uc); +} + +static inline struct intel_gt *guc_to_gt(struct intel_guc *guc) +{ + return container_of(guc, struct intel_gt, uc.guc); +} + +static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) +{ + return container_of(huc, struct intel_gt, uc.huc); +} + +void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); +void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt); +int __must_check intel_gt_init_hw(struct intel_gt *gt); +int intel_gt_init(struct intel_gt *gt); +void intel_gt_driver_register(struct intel_gt *gt); + +void intel_gt_driver_unregister(struct intel_gt *gt); +void intel_gt_driver_remove(struct intel_gt *gt); +void intel_gt_driver_release(struct intel_gt *gt); + +void intel_gt_driver_late_release(struct intel_gt *gt); + +void intel_gt_check_and_clear_faults(struct intel_gt *gt); +void intel_gt_clear_error_registers(struct intel_gt *gt, + intel_engine_mask_t engine_mask); + +void intel_gt_flush_ggtt_writes(struct intel_gt *gt); +void intel_gt_chipset_flush(struct intel_gt *gt); + +static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, + enum intel_gt_scratch_field field) +{ + return i915_ggtt_offset(gt->scratch) + field; +} + +static inline bool intel_gt_is_wedged(struct intel_gt *gt) +{ + return __intel_reset_failed(>->reset); +} + +#endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c new file mode 100644 index 000000000000..973ee7eded64 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -0,0 +1,456 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/sched/clock.h> + +#include "i915_drv.h" +#include "i915_irq.h" +#include "intel_gt.h" +#include "intel_gt_irq.h" +#include "intel_uncore.h" +#include "intel_rps.h" + +static void guc_irq_handler(struct intel_guc *guc, u16 iir) +{ + if (iir & GUC_INTR_GUC2HOST) + intel_guc_to_host_event_handler(guc); +} + +static void +cs_irq_handler(struct intel_engine_cs *engine, u32 iir) +{ + bool tasklet = false; + + if (iir & GT_CONTEXT_SWITCH_INTERRUPT) + tasklet = true; + + if (iir & GT_RENDER_USER_INTERRUPT) { + intel_engine_breadcrumbs_irq(engine); + tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); + } + + if (tasklet) + tasklet_hi_schedule(&engine->execlists.tasklet); +} + +static u32 +gen11_gt_engine_identity(struct intel_gt *gt, + const unsigned int bank, const unsigned int bit) +{ + void __iomem * const regs = gt->uncore->regs; + u32 timeout_ts; + u32 ident; + + lockdep_assert_held(>->irq_lock); + + raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); + + /* + * NB: Specs do not specify how long to spin wait, + * so we do ~100us as an educated guess. + */ + timeout_ts = (local_clock() >> 10) + 100; + do { + ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); + } while (!(ident & GEN11_INTR_DATA_VALID) && + !time_after32(local_clock() >> 10, timeout_ts)); + + if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { + DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", + bank, bit, ident); + return 0; + } + + raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), + GEN11_INTR_DATA_VALID); + + return ident; +} + +static void +gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, + const u16 iir) +{ + if (instance == OTHER_GUC_INSTANCE) + return guc_irq_handler(>->uc.guc, iir); + + if (instance == OTHER_GTPM_INSTANCE) + return gen11_rps_irq_handler(>->rps, iir); + + WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", + instance, iir); +} + +static void +gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, + const u8 instance, const u16 iir) +{ + struct intel_engine_cs *engine; + + if (instance <= MAX_ENGINE_INSTANCE) + engine = gt->engine_class[class][instance]; + else + engine = NULL; + + if (likely(engine)) + return cs_irq_handler(engine, iir); + + WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", + class, instance); +} + +static void +gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) +{ + const u8 class = GEN11_INTR_ENGINE_CLASS(identity); + const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); + const u16 intr = GEN11_INTR_ENGINE_INTR(identity); + + if (unlikely(!intr)) + return; + + if (class <= COPY_ENGINE_CLASS) + return gen11_engine_irq_handler(gt, class, instance, intr); + + if (class == OTHER_CLASS) + return gen11_other_irq_handler(gt, instance, intr); + + WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", + class, instance, intr); +} + +static void +gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) +{ + void __iomem * const regs = gt->uncore->regs; + unsigned long intr_dw; + unsigned int bit; + + lockdep_assert_held(>->irq_lock); + + intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); + + for_each_set_bit(bit, &intr_dw, 32) { + const u32 ident = gen11_gt_engine_identity(gt, bank, bit); + + gen11_gt_identity_handler(gt, ident); + } + + /* Clear must be after shared has been served for engine */ + raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); +} + +void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) +{ + unsigned int bank; + + spin_lock(>->irq_lock); + + for (bank = 0; bank < 2; bank++) { + if (master_ctl & GEN11_GT_DW_IRQ(bank)) + gen11_gt_bank_handler(gt, bank); + } + + spin_unlock(>->irq_lock); +} + +bool gen11_gt_reset_one_iir(struct intel_gt *gt, + const unsigned int bank, const unsigned int bit) +{ + void __iomem * const regs = gt->uncore->regs; + u32 dw; + + lockdep_assert_held(>->irq_lock); + + dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); + if (dw & BIT(bit)) { + /* + * According to the BSpec, DW_IIR bits cannot be cleared without + * first servicing the Selector & Shared IIR registers. + */ + gen11_gt_engine_identity(gt, bank, bit); + + /* + * We locked GT INT DW by reading it. If we want to (try + * to) recover from this successfully, we need to clear + * our bit, otherwise we are locking the register for + * everybody. + */ + raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); + + return true; + } + + return false; +} + +void gen11_gt_irq_reset(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + + /* Disable RCS, BCS, VCS and VECS class engines. */ + intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); + + /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ + intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); + + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); +} + +void gen11_gt_irq_postinstall(struct intel_gt *gt) +{ + const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; + struct intel_uncore *uncore = gt->uncore; + const u32 dmask = irqs << 16 | irqs; + const u32 smask = irqs << 16; + + BUILD_BUG_ON(irqs & 0xffff0000); + + /* Enable RCS, BCS, VCS and VECS class interrupts. */ + intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); + intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); + + /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ + intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); + intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); + intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); + intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); + intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); + + /* + * RPS interrupts will get enabled/disabled on demand when RPS itself + * is enabled/disabled. + */ + gt->pm_ier = 0x0; + gt->pm_imr = ~gt->pm_ier; + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); + + /* Same thing for GuC interrupts */ + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); +} + +void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) +{ + if (gt_iir & GT_RENDER_USER_INTERRUPT) + intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]); + if (gt_iir & ILK_BSD_USER_INTERRUPT) + intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]); +} + +static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) +{ + if (!HAS_L3_DPF(gt->i915)) + return; + + spin_lock(>->irq_lock); + gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); + spin_unlock(>->irq_lock); + + if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) + gt->i915->l3_parity.which_slice |= 1 << 1; + + if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) + gt->i915->l3_parity.which_slice |= 1 << 0; + + schedule_work(>->i915->l3_parity.error_work); +} + +void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) +{ + if (gt_iir & GT_RENDER_USER_INTERRUPT) + intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]); + if (gt_iir & GT_BSD_USER_INTERRUPT) + intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]); + if (gt_iir & GT_BLT_USER_INTERRUPT) + intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]); + + if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | + GT_BSD_CS_ERROR_INTERRUPT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) + DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); + + if (gt_iir & GT_PARITY_ERROR(gt->i915)) + gen7_parity_error_irq_handler(gt, gt_iir); +} + +void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) +{ + void __iomem * const regs = gt->uncore->regs; + + if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { + gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); + if (likely(gt_iir[0])) + raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); + } + + if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { + gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); + if (likely(gt_iir[1])) + raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); + } + + if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { + gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); + if (likely(gt_iir[2])) + raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); + } + + if (master_ctl & GEN8_GT_VECS_IRQ) { + gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); + if (likely(gt_iir[3])) + raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); + } +} + +void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) +{ + if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { + cs_irq_handler(gt->engine_class[RENDER_CLASS][0], + gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); + cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], + gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); + } + + if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { + cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], + gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT); + cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], + gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); + } + + if (master_ctl & GEN8_GT_VECS_IRQ) { + cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], + gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); + } + + if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { + gen6_rps_irq_handler(>->rps, gt_iir[2]); + guc_irq_handler(>->uc.guc, gt_iir[2] >> 16); + } +} + +void gen8_gt_irq_reset(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + + GEN8_IRQ_RESET_NDX(uncore, GT, 0); + GEN8_IRQ_RESET_NDX(uncore, GT, 1); + GEN8_IRQ_RESET_NDX(uncore, GT, 2); + GEN8_IRQ_RESET_NDX(uncore, GT, 3); +} + +void gen8_gt_irq_postinstall(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + + /* These are interrupts we'll toggle with the ring mask register */ + u32 gt_interrupts[] = { + (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT), + + (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT), + + 0, + + (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) + }; + + gt->pm_ier = 0x0; + gt->pm_imr = ~gt->pm_ier; + GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); + GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); + /* + * RPS interrupts will get enabled/disabled on demand when RPS itself + * is enabled/disabled. Same wil be the case for GuC interrupts. + */ + GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); + GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); +} + +static void gen5_gt_update_irq(struct intel_gt *gt, + u32 interrupt_mask, + u32 enabled_irq_mask) +{ + lockdep_assert_held(>->irq_lock); + + GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); + + gt->gt_imr &= ~interrupt_mask; + gt->gt_imr |= (~enabled_irq_mask & interrupt_mask); + intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr); +} + +void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask) +{ + gen5_gt_update_irq(gt, mask, mask); + intel_uncore_posting_read_fw(gt->uncore, GTIMR); +} + +void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask) +{ + gen5_gt_update_irq(gt, mask, 0); +} + +void gen5_gt_irq_reset(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + + GEN3_IRQ_RESET(uncore, GT); + if (INTEL_GEN(gt->i915) >= 6) + GEN3_IRQ_RESET(uncore, GEN6_PM); +} + +void gen5_gt_irq_postinstall(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + u32 pm_irqs = 0; + u32 gt_irqs = 0; + + gt->gt_imr = ~0; + if (HAS_L3_DPF(gt->i915)) { + /* L3 parity interrupt is always unmasked. */ + gt->gt_imr = ~GT_PARITY_ERROR(gt->i915); + gt_irqs |= GT_PARITY_ERROR(gt->i915); + } + + gt_irqs |= GT_RENDER_USER_INTERRUPT; + if (IS_GEN(gt->i915, 5)) + gt_irqs |= ILK_BSD_USER_INTERRUPT; + else + gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; + + GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); + + if (INTEL_GEN(gt->i915) >= 6) { + /* + * RPS interrupts will get enabled/disabled on demand when RPS + * itself is enabled/disabled. + */ + if (HAS_ENGINE(gt->i915, VECS0)) { + pm_irqs |= PM_VEBOX_USER_INTERRUPT; + gt->pm_ier |= PM_VEBOX_USER_INTERRUPT; + } + + gt->pm_imr = 0xffffffff; + GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); + } +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h new file mode 100644 index 000000000000..8f37593712c9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_GT_IRQ_H +#define INTEL_GT_IRQ_H + +#include <linux/types.h> + +struct intel_gt; + +#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ + GEN8_GT_BCS_IRQ | \ + GEN8_GT_VCS0_IRQ | \ + GEN8_GT_VCS1_IRQ | \ + GEN8_GT_VECS_IRQ | \ + GEN8_GT_PM_IRQ | \ + GEN8_GT_GUC_IRQ) + +void gen11_gt_irq_reset(struct intel_gt *gt); +void gen11_gt_irq_postinstall(struct intel_gt *gt); +void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl); + +bool gen11_gt_reset_one_iir(struct intel_gt *gt, + const unsigned int bank, + const unsigned int bit); + +void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir); + +void gen5_gt_irq_postinstall(struct intel_gt *gt); +void gen5_gt_irq_reset(struct intel_gt *gt); +void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask); +void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask); + +void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir); + +void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]); +void gen8_gt_irq_reset(struct intel_gt *gt); +void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]); +void gen8_gt_irq_postinstall(struct intel_gt *gt); + +#endif /* INTEL_GT_IRQ_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c new file mode 100644 index 000000000000..6187cdd06646 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -0,0 +1,331 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/suspend.h> + +#include "i915_drv.h" +#include "i915_globals.h" +#include "i915_params.h" +#include "intel_context.h" +#include "intel_engine_pm.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_gt_requests.h" +#include "intel_llc.h" +#include "intel_pm.h" +#include "intel_rc6.h" +#include "intel_rps.h" +#include "intel_wakeref.h" + +static void user_forcewake(struct intel_gt *gt, bool suspend) +{ + int count = atomic_read(>->user_wakeref); + + /* Inside suspend/resume so single threaded, no races to worry about. */ + if (likely(!count)) + return; + + intel_gt_pm_get(gt); + if (suspend) { + GEM_BUG_ON(count > atomic_read(>->wakeref.count)); + atomic_sub(count, >->wakeref.count); + } else { + atomic_add(count, >->wakeref.count); + } + intel_gt_pm_put(gt); +} + +static int __gt_unpark(struct intel_wakeref *wf) +{ + struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); + struct drm_i915_private *i915 = gt->i915; + + GEM_TRACE("\n"); + + i915_globals_unpark(); + + /* + * It seems that the DMC likes to transition between the DC states a lot + * when there are no connected displays (no active power domains) during + * command submission. + * + * This activity has negative impact on the performance of the chip with + * huge latencies observed in the interrupt handler and elsewhere. + * + * Work around it by grabbing a GT IRQ power domain whilst there is any + * GT activity, preventing any DC state transitions. + */ + gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + GEM_BUG_ON(!gt->awake); + + if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); + + intel_rps_unpark(>->rps); + i915_pmu_gt_unparked(i915); + + intel_gt_unpark_requests(gt); + + return 0; +} + +static int __gt_park(struct intel_wakeref *wf) +{ + struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); + intel_wakeref_t wakeref = fetch_and_zero(>->awake); + struct drm_i915_private *i915 = gt->i915; + + GEM_TRACE("\n"); + + intel_gt_park_requests(gt); + + i915_vma_parked(gt); + i915_pmu_gt_parked(i915); + intel_rps_park(>->rps); + + /* Everything switched off, flush any residual interrupt just in case */ + intel_synchronize_irq(i915); + + if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) { + intel_rc6_ctx_wa_check(&i915->gt.rc6); + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + } + + GEM_BUG_ON(!wakeref); + intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); + + i915_globals_park(); + + return 0; +} + +static const struct intel_wakeref_ops wf_ops = { + .get = __gt_unpark, + .put = __gt_park, + .flags = INTEL_WAKEREF_PUT_ASYNC, +}; + +void intel_gt_pm_init_early(struct intel_gt *gt) +{ + intel_wakeref_init(>->wakeref, gt->uncore->rpm, &wf_ops); +} + +void intel_gt_pm_init(struct intel_gt *gt) +{ + /* + * Enabling power-management should be "self-healing". If we cannot + * enable a feature, simply leave it disabled with a notice to the + * user. + */ + intel_rc6_init(>->rc6); + intel_rps_init(>->rps); +} + +static bool reset_engines(struct intel_gt *gt) +{ + if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + return false; + + return __intel_gt_reset(gt, ALL_ENGINES) == 0; +} + +/** + * intel_gt_sanitize: called after the GPU has lost power + * @gt: the i915 GT container + * @force: ignore a failed reset and sanitize engine state anyway + * + * Anytime we reset the GPU, either with an explicit GPU reset or through a + * PCI power cycle, the GPU loses state and we must reset our state tracking + * to match. Note that calling intel_gt_sanitize() if the GPU has not + * been reset results in much confusion! + */ +void intel_gt_sanitize(struct intel_gt *gt, bool force) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + + GEM_TRACE("force:%s\n", yesno(force)); + + /* Use a raw wakeref to avoid calling intel_display_power_get early */ + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + + /* + * As we have just resumed the machine and woken the device up from + * deep PCI sleep (presumably D3_cold), assume the HW has been reset + * back to defaults, recovering from whatever wedged state we left it + * in and so worth trying to use the device once more. + */ + if (intel_gt_is_wedged(gt)) + intel_gt_unset_wedged(gt); + + intel_uc_sanitize(>->uc); + + for_each_engine(engine, gt, id) + if (engine->reset.prepare) + engine->reset.prepare(engine); + + intel_uc_reset_prepare(>->uc); + + if (reset_engines(gt) || force) { + for_each_engine(engine, gt, id) + __intel_engine_reset(engine, false); + } + + for_each_engine(engine, gt, id) + if (engine->reset.finish) + engine->reset.finish(engine); + + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); +} + +void intel_gt_pm_fini(struct intel_gt *gt) +{ + intel_rc6_fini(>->rc6); +} + +int intel_gt_resume(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + GEM_TRACE("\n"); + + /* + * After resume, we may need to poke into the pinned kernel + * contexts to paper over any damage caused by the sudden suspend. + * Only the kernel contexts should remain pinned over suspend, + * allowing us to fixup the user contexts on their first pin. + */ + intel_gt_pm_get(gt); + + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + intel_rc6_sanitize(>->rc6); + + intel_rps_enable(>->rps); + intel_llc_enable(>->llc); + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + + intel_engine_pm_get(engine); + + ce = engine->kernel_context; + if (ce) { + GEM_BUG_ON(!intel_context_is_pinned(ce)); + ce->ops->reset(ce); + } + + engine->serial++; /* kernel context lost */ + err = engine->resume(engine); + + intel_engine_pm_put(engine); + if (err) { + dev_err(gt->i915->drm.dev, + "Failed to restart %s (%d)\n", + engine->name, err); + break; + } + } + + intel_rc6_enable(>->rc6); + + intel_uc_resume(>->uc); + + user_forcewake(gt, false); + + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); + intel_gt_pm_put(gt); + + return err; +} + +static void wait_for_suspend(struct intel_gt *gt) +{ + if (!intel_gt_pm_is_awake(gt)) + return; + + if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { + /* + * Forcibly cancel outstanding work and leave + * the gpu quiet. + */ + intel_gt_set_wedged(gt); + } + + intel_gt_pm_wait_for_idle(gt); +} + +void intel_gt_suspend_prepare(struct intel_gt *gt) +{ + user_forcewake(gt, true); + wait_for_suspend(gt); + + intel_uc_suspend(>->uc); +} + +static suspend_state_t pm_suspend_target(void) +{ +#if IS_ENABLED(CONFIG_PM_SLEEP) + return pm_suspend_target_state; +#else + return PM_SUSPEND_TO_IDLE; +#endif +} + +void intel_gt_suspend_late(struct intel_gt *gt) +{ + intel_wakeref_t wakeref; + + /* We expect to be idle already; but also want to be independent */ + wait_for_suspend(gt); + + /* + * On disabling the device, we want to turn off HW access to memory + * that we no longer own. + * + * However, not all suspend-states disable the device. S0 (s2idle) + * is effectively runtime-suspend, the device is left powered on + * but needs to be put into a low power state. We need to keep + * powermanagement enabled, but we also retain system state and so + * it remains safe to keep on using our allocated memory. + */ + if (pm_suspend_target() == PM_SUSPEND_TO_IDLE) + return; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) { + intel_rps_disable(>->rps); + intel_rc6_disable(>->rc6); + intel_llc_disable(>->llc); + } + + intel_gt_sanitize(gt, false); + + GEM_TRACE("\n"); +} + +void intel_gt_runtime_suspend(struct intel_gt *gt) +{ + intel_uc_runtime_suspend(>->uc); + + GEM_TRACE("\n"); +} + +int intel_gt_runtime_resume(struct intel_gt *gt) +{ + GEM_TRACE("\n"); + + intel_gt_init_swizzling(gt); + + return intel_uc_runtime_resume(>->uc); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_gt_pm.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h new file mode 100644 index 000000000000..b3e17399be9b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_GT_PM_H +#define INTEL_GT_PM_H + +#include <linux/types.h> + +#include "intel_gt_types.h" +#include "intel_wakeref.h" + +static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt) +{ + return intel_wakeref_is_active(>->wakeref); +} + +static inline void intel_gt_pm_get(struct intel_gt *gt) +{ + intel_wakeref_get(>->wakeref); +} + +static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt) +{ + return intel_wakeref_get_if_active(>->wakeref); +} + +static inline void intel_gt_pm_put(struct intel_gt *gt) +{ + intel_wakeref_put(>->wakeref); +} + +static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) +{ + return intel_wakeref_wait_for_idle(>->wakeref); +} + +void intel_gt_pm_init_early(struct intel_gt *gt); +void intel_gt_pm_init(struct intel_gt *gt); +void intel_gt_pm_fini(struct intel_gt *gt); + +void intel_gt_sanitize(struct intel_gt *gt, bool force); + +void intel_gt_suspend_prepare(struct intel_gt *gt); +void intel_gt_suspend_late(struct intel_gt *gt); +int intel_gt_resume(struct intel_gt *gt); + +void intel_gt_runtime_suspend(struct intel_gt *gt); +int intel_gt_runtime_resume(struct intel_gt *gt); + +static inline bool is_mock_gt(const struct intel_gt *gt) +{ + return I915_SELFTEST_ONLY(gt->awake == -ENODEV); +} + +#endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c new file mode 100644 index 000000000000..babe866126d7 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_gt.h" +#include "intel_gt_irq.h" +#include "intel_gt_pm_irq.h" + +static void write_pm_imr(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 mask = gt->pm_imr; + i915_reg_t reg; + + if (INTEL_GEN(i915) >= 11) { + reg = GEN11_GPM_WGBOXPERF_INTR_MASK; + mask <<= 16; /* pm is in upper half */ + } else if (INTEL_GEN(i915) >= 8) { + reg = GEN8_GT_IMR(2); + } else { + reg = GEN6_PMIMR; + } + + intel_uncore_write(uncore, reg, mask); +} + +static void gen6_gt_pm_update_irq(struct intel_gt *gt, + u32 interrupt_mask, + u32 enabled_irq_mask) +{ + u32 new_val; + + WARN_ON(enabled_irq_mask & ~interrupt_mask); + + lockdep_assert_held(>->irq_lock); + + new_val = gt->pm_imr; + new_val &= ~interrupt_mask; + new_val |= ~enabled_irq_mask & interrupt_mask; + + if (new_val != gt->pm_imr) { + gt->pm_imr = new_val; + write_pm_imr(gt); + } +} + +void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask) +{ + gen6_gt_pm_update_irq(gt, mask, mask); +} + +void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask) +{ + gen6_gt_pm_update_irq(gt, mask, 0); +} + +void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask) +{ + struct intel_uncore *uncore = gt->uncore; + i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; + + lockdep_assert_held(>->irq_lock); + + intel_uncore_write(uncore, reg, reset_mask); + intel_uncore_write(uncore, reg, reset_mask); + intel_uncore_posting_read(uncore, reg); +} + +static void write_pm_ier(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 mask = gt->pm_ier; + i915_reg_t reg; + + if (INTEL_GEN(i915) >= 11) { + reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE; + mask <<= 16; /* pm is in upper half */ + } else if (INTEL_GEN(i915) >= 8) { + reg = GEN8_GT_IER(2); + } else { + reg = GEN6_PMIER; + } + + intel_uncore_write(uncore, reg, mask); +} + +void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask) +{ + lockdep_assert_held(>->irq_lock); + + gt->pm_ier |= enable_mask; + write_pm_ier(gt); + gen6_gt_pm_unmask_irq(gt, enable_mask); +} + +void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask) +{ + lockdep_assert_held(>->irq_lock); + + gt->pm_ier &= ~disable_mask; + gen6_gt_pm_mask_irq(gt, disable_mask); + write_pm_ier(gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h new file mode 100644 index 000000000000..b29816a04809 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_GT_PM_IRQ_H +#define INTEL_GT_PM_IRQ_H + +#include <linux/types.h> + +struct intel_gt; + +void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask); +void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask); + +void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask); +void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask); + +void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask); + +#endif /* INTEL_GT_PM_IRQ_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c new file mode 100644 index 000000000000..353809ac2754 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -0,0 +1,137 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" /* for_each_engine() */ +#include "i915_request.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_gt_requests.h" +#include "intel_timeline.h" + +static void retire_requests(struct intel_timeline *tl) +{ + struct i915_request *rq, *rn; + + list_for_each_entry_safe(rq, rn, &tl->requests, link) + if (!i915_request_retire(rq)) + break; +} + +static void flush_submission(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + intel_engine_flush_submission(engine); +} + +long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) +{ + struct intel_gt_timelines *timelines = >->timelines; + struct intel_timeline *tl, *tn; + unsigned long active_count = 0; + unsigned long flags; + bool interruptible; + LIST_HEAD(free); + + interruptible = true; + if (unlikely(timeout < 0)) + timeout = -timeout, interruptible = false; + + flush_submission(gt); /* kick the ksoftirqd tasklets */ + + spin_lock_irqsave(&timelines->lock, flags); + list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { + if (!mutex_trylock(&tl->mutex)) { + active_count++; /* report busy to caller, try again? */ + continue; + } + + intel_timeline_get(tl); + GEM_BUG_ON(!tl->active_count); + tl->active_count++; /* pin the list element */ + spin_unlock_irqrestore(&timelines->lock, flags); + + if (timeout > 0) { + struct dma_fence *fence; + + fence = i915_active_fence_get(&tl->last_request); + if (fence) { + timeout = dma_fence_wait_timeout(fence, + interruptible, + timeout); + dma_fence_put(fence); + } + } + + retire_requests(tl); + + spin_lock_irqsave(&timelines->lock, flags); + + /* Resume iteration after dropping lock */ + list_safe_reset_next(tl, tn, link); + if (!--tl->active_count) + list_del(&tl->link); + else + active_count += !!rcu_access_pointer(tl->last_request.fence); + + mutex_unlock(&tl->mutex); + + /* Defer the final release to after the spinlock */ + if (refcount_dec_and_test(&tl->kref.refcount)) { + GEM_BUG_ON(tl->active_count); + list_add(&tl->link, &free); + } + } + spin_unlock_irqrestore(&timelines->lock, flags); + + list_for_each_entry_safe(tl, tn, &free, link) + __intel_timeline_free(&tl->kref); + + return active_count ? timeout : 0; +} + +int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout) +{ + /* If the device is asleep, we have no requests outstanding */ + if (!intel_gt_pm_is_awake(gt)) + return 0; + + while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) { + cond_resched(); + if (signal_pending(current)) + return -EINTR; + } + + return timeout; +} + +static void retire_work_handler(struct work_struct *work) +{ + struct intel_gt *gt = + container_of(work, typeof(*gt), requests.retire_work.work); + + intel_gt_retire_requests(gt); + schedule_delayed_work(>->requests.retire_work, + round_jiffies_up_relative(HZ)); +} + +void intel_gt_init_requests(struct intel_gt *gt) +{ + INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); +} + +void intel_gt_park_requests(struct intel_gt *gt) +{ + cancel_delayed_work(>->requests.retire_work); +} + +void intel_gt_unpark_requests(struct intel_gt *gt) +{ + schedule_delayed_work(>->requests.retire_work, + round_jiffies_up_relative(HZ)); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h new file mode 100644 index 000000000000..bd31cbce47e0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_GT_REQUESTS_H +#define INTEL_GT_REQUESTS_H + +struct intel_gt; + +long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout); +static inline void intel_gt_retire_requests(struct intel_gt *gt) +{ + intel_gt_retire_requests_timeout(gt, 0); +} + +int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); + +void intel_gt_init_requests(struct intel_gt *gt); +void intel_gt_park_requests(struct intel_gt *gt); +void intel_gt_unpark_requests(struct intel_gt *gt); + +#endif /* INTEL_GT_REQUESTS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h new file mode 100644 index 000000000000..d4e14dbd172e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_GT_TYPES__ +#define __INTEL_GT_TYPES__ + +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "uc/intel_uc.h" + +#include "i915_vma.h" +#include "intel_engine_types.h" +#include "intel_llc_types.h" +#include "intel_reset_types.h" +#include "intel_rc6_types.h" +#include "intel_rps_types.h" +#include "intel_wakeref.h" + +struct drm_i915_private; +struct i915_ggtt; +struct intel_engine_cs; +struct intel_uncore; + +struct intel_gt { + struct drm_i915_private *i915; + struct intel_uncore *uncore; + struct i915_ggtt *ggtt; + + struct intel_uc uc; + + struct intel_gt_timelines { + spinlock_t lock; /* protects active_list */ + struct list_head active_list; + + /* Pack multiple timelines' seqnos into the same page */ + spinlock_t hwsp_lock; + struct list_head hwsp_free_list; + } timelines; + + struct intel_gt_requests { + /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to + * fire periodically while the ring is running. When it + * fires, go retire requests. + */ + struct delayed_work retire_work; + } requests; + + struct intel_wakeref wakeref; + atomic_t user_wakeref; + + struct list_head closed_vma; + spinlock_t closed_lock; /* guards the list of closed_vma */ + + struct intel_reset reset; + + /** + * Is the GPU currently considered idle, or busy executing + * userspace requests? Whilst idle, we allow runtime power + * management to power down the hardware and display clocks. + * In order to reduce the effect on performance, there + * is a slight delay before we do so. + */ + intel_wakeref_t awake; + + struct intel_llc llc; + struct intel_rc6 rc6; + struct intel_rps rps; + + ktime_t last_init_time; + + struct i915_vma *scratch; + + spinlock_t irq_lock; + u32 gt_imr; + u32 pm_ier; + u32 pm_imr; + + u32 pm_guc_events; + + struct intel_engine_cs *engine[I915_NUM_ENGINES]; + struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] + [MAX_ENGINE_INSTANCE + 1]; +}; + +enum intel_gt_scratch_field { + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, + + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, + + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256, + + /* 6 * 8 bytes */ + INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048, + + /* 4 bytes */ + INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096, +}; + +#endif /* __INTEL_GT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c new file mode 100644 index 000000000000..ceb785b75c25 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/cpufreq.h> + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_llc.h" +#include "intel_sideband.h" + +struct ia_constants { + unsigned int min_gpu_freq; + unsigned int max_gpu_freq; + + unsigned int min_ring_freq; + unsigned int max_ia_freq; +}; + +static struct intel_gt *llc_to_gt(struct intel_llc *llc) +{ + return container_of(llc, struct intel_gt, llc); +} + +static unsigned int cpu_max_MHz(void) +{ + struct cpufreq_policy *policy; + unsigned int max_khz; + + policy = cpufreq_cpu_get(0); + if (policy) { + max_khz = policy->cpuinfo.max_freq; + cpufreq_cpu_put(policy); + } else { + /* + * Default to measured freq if none found, PCU will ensure we + * don't go over + */ + max_khz = tsc_khz; + } + + return max_khz / 1000; +} + +static bool get_ia_constants(struct intel_llc *llc, + struct ia_constants *consts) +{ + struct drm_i915_private *i915 = llc_to_gt(llc)->i915; + struct intel_rps *rps = &llc_to_gt(llc)->rps; + + if (rps->max_freq <= rps->min_freq) + return false; + + consts->max_ia_freq = cpu_max_MHz(); + + consts->min_ring_freq = + intel_uncore_read(llc_to_gt(llc)->uncore, DCLK) & 0xf; + /* convert DDR frequency from units of 266.6MHz to bandwidth */ + consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3); + + consts->min_gpu_freq = rps->min_freq; + consts->max_gpu_freq = rps->max_freq; + if (INTEL_GEN(i915) >= 9) { + /* Convert GT frequency to 50 HZ units */ + consts->min_gpu_freq /= GEN9_FREQ_SCALER; + consts->max_gpu_freq /= GEN9_FREQ_SCALER; + } + + return true; +} + +static void calc_ia_freq(struct intel_llc *llc, + unsigned int gpu_freq, + const struct ia_constants *consts, + unsigned int *out_ia_freq, + unsigned int *out_ring_freq) +{ + struct drm_i915_private *i915 = llc_to_gt(llc)->i915; + const int diff = consts->max_gpu_freq - gpu_freq; + unsigned int ia_freq = 0, ring_freq = 0; + + if (INTEL_GEN(i915) >= 9) { + /* + * ring_freq = 2 * GT. ring_freq is in 100MHz units + * No floor required for ring frequency on SKL. + */ + ring_freq = gpu_freq; + } else if (INTEL_GEN(i915) >= 8) { + /* max(2 * GT, DDR). NB: GT is 50MHz units */ + ring_freq = max(consts->min_ring_freq, gpu_freq); + } else if (IS_HASWELL(i915)) { + ring_freq = mult_frac(gpu_freq, 5, 4); + ring_freq = max(consts->min_ring_freq, ring_freq); + /* leave ia_freq as the default, chosen by cpufreq */ + } else { + const int min_freq = 15; + const int scale = 180; + + /* + * On older processors, there is no separate ring + * clock domain, so in order to boost the bandwidth + * of the ring, we need to upclock the CPU (ia_freq). + * + * For GPU frequencies less than 750MHz, + * just use the lowest ring freq. + */ + if (gpu_freq < min_freq) + ia_freq = 800; + else + ia_freq = consts->max_ia_freq - diff * scale / 2; + ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); + } + + *out_ia_freq = ia_freq; + *out_ring_freq = ring_freq; +} + +static void gen6_update_ring_freq(struct intel_llc *llc) +{ + struct drm_i915_private *i915 = llc_to_gt(llc)->i915; + struct ia_constants consts; + unsigned int gpu_freq; + + if (!get_ia_constants(llc, &consts)) + return; + + /* + * For each potential GPU frequency, load a ring frequency we'd like + * to use for memory access. We do this by specifying the IA frequency + * the PCU should use as a reference to determine the ring frequency. + */ + for (gpu_freq = consts.max_gpu_freq; + gpu_freq >= consts.min_gpu_freq; + gpu_freq--) { + unsigned int ia_freq, ring_freq; + + calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); + sandybridge_pcode_write(i915, + GEN6_PCODE_WRITE_MIN_FREQ_TABLE, + ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | + ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | + gpu_freq); + } +} + +void intel_llc_enable(struct intel_llc *llc) +{ + if (HAS_LLC(llc_to_gt(llc)->i915)) + gen6_update_ring_freq(llc); +} + +void intel_llc_disable(struct intel_llc *llc) +{ + /* Currently there is no HW configuration to be done to disable. */ +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_llc.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h new file mode 100644 index 000000000000..ef09a890d2b7 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_llc.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_LLC_H +#define INTEL_LLC_H + +struct intel_llc; + +void intel_llc_enable(struct intel_llc *llc); +void intel_llc_disable(struct intel_llc *llc); + +#endif /* INTEL_LLC_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h new file mode 100644 index 000000000000..ecad4687b930 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_LLC_TYPES_H +#define INTEL_LLC_TYPES_H + +struct intel_llc { +}; + +#endif /* INTEL_LLC_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c new file mode 100644 index 000000000000..0ac3b26674ad --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -0,0 +1,4722 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Ben Widawsky <[email protected]> + * Michel Thierry <[email protected]> + * Thomas Daniel <[email protected]> + * Oscar Mateo <[email protected]> + * + */ + +/** + * DOC: Logical Rings, Logical Ring Contexts and Execlists + * + * Motivation: + * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". + * These expanded contexts enable a number of new abilities, especially + * "Execlists" (also implemented in this file). + * + * One of the main differences with the legacy HW contexts is that logical + * ring contexts incorporate many more things to the context's state, like + * PDPs or ringbuffer control registers: + * + * The reason why PDPs are included in the context is straightforward: as + * PPGTTs (per-process GTTs) are actually per-context, having the PDPs + * contained there mean you don't need to do a ppgtt->switch_mm yourself, + * instead, the GPU will do it for you on the context switch. + * + * But, what about the ringbuffer control registers (head, tail, etc..)? + * shouldn't we just need a set of those per engine command streamer? This is + * where the name "Logical Rings" starts to make sense: by virtualizing the + * rings, the engine cs shifts to a new "ring buffer" with every context + * switch. When you want to submit a workload to the GPU you: A) choose your + * context, B) find its appropriate virtualized ring, C) write commands to it + * and then, finally, D) tell the GPU to switch to that context. + * + * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch + * to a contexts is via a context execution list, ergo "Execlists". + * + * LRC implementation: + * Regarding the creation of contexts, we have: + * + * - One global default context. + * - One local default context for each opened fd. + * - One local extra context for each context create ioctl call. + * + * Now that ringbuffers belong per-context (and not per-engine, like before) + * and that contexts are uniquely tied to a given engine (and not reusable, + * like before) we need: + * + * - One ringbuffer per-engine inside each context. + * - One backing object per-engine inside each context. + * + * The global default context starts its life with these new objects fully + * allocated and populated. The local default context for each opened fd is + * more complex, because we don't know at creation time which engine is going + * to use them. To handle this, we have implemented a deferred creation of LR + * contexts: + * + * The local context starts its life as a hollow or blank holder, that only + * gets populated for a given engine once we receive an execbuffer. If later + * on we receive another execbuffer ioctl for the same context but a different + * engine, we allocate/populate a new ringbuffer and context backing object and + * so on. + * + * Finally, regarding local contexts created using the ioctl call: as they are + * only allowed with the render ring, we can allocate & populate them right + * away (no need to defer anything, at least for now). + * + * Execlists implementation: + * Execlists are the new method by which, on gen8+ hardware, workloads are + * submitted for execution (as opposed to the legacy, ringbuffer-based, method). + * This method works as follows: + * + * When a request is committed, its commands (the BB start and any leading or + * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer + * for the appropriate context. The tail pointer in the hardware context is not + * updated at this time, but instead, kept by the driver in the ringbuffer + * structure. A structure representing this request is added to a request queue + * for the appropriate engine: this structure contains a copy of the context's + * tail after the request was written to the ring buffer and a pointer to the + * context itself. + * + * If the engine's request queue was empty before the request was added, the + * queue is processed immediately. Otherwise the queue will be processed during + * a context switch interrupt. In any case, elements on the queue will get sent + * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a + * globally unique 20-bits submission ID. + * + * When execution of a request completes, the GPU updates the context status + * buffer with a context complete event and generates a context switch interrupt. + * During the interrupt handling, the driver examines the events in the buffer: + * for each context complete event, if the announced ID matches that on the head + * of the request queue, then that request is retired and removed from the queue. + * + * After processing, if any requests were retired and the queue is not empty + * then a new execution list can be submitted. The two requests at the front of + * the queue are next to be submitted but since a context may not occur twice in + * an execution list, if subsequent requests have the same ID as the first then + * the two requests must be combined. This is done simply by discarding requests + * at the head of the queue until either only one requests is left (in which case + * we use a NULL second context) or the first two requests have unique IDs. + * + * By always executing the first two requests in the queue the driver ensures + * that the GPU is kept as busy as possible. In the case where a single context + * completes but a second context is still executing, the request for this second + * context will be at the head of the queue when we remove the first one. This + * request will then be resubmitted along with a new request for a different context, + * which will cause the hardware to continue executing the second request and queue + * the new request (the GPU detects the condition of a context getting preempted + * with the same context and optimizes the context switch flow by not doing + * preemption, but just sampling the new tail pointer). + * + */ +#include <linux/interrupt.h> + +#include "gem/i915_gem_context.h" + +#include "i915_drv.h" +#include "i915_perf.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_engine_pm.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_lrc_reg.h" +#include "intel_mocs.h" +#include "intel_reset.h" +#include "intel_ring.h" +#include "intel_workarounds.h" + +#define RING_EXECLIST_QFULL (1 << 0x2) +#define RING_EXECLIST1_VALID (1 << 0x3) +#define RING_EXECLIST0_VALID (1 << 0x4) +#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) +#define RING_EXECLIST1_ACTIVE (1 << 0x11) +#define RING_EXECLIST0_ACTIVE (1 << 0x12) + +#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) +#define GEN8_CTX_STATUS_PREEMPTED (1 << 1) +#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) +#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) +#define GEN8_CTX_STATUS_COMPLETE (1 << 4) +#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) + +#define GEN8_CTX_STATUS_COMPLETED_MASK \ + (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) + +#define CTX_DESC_FORCE_RESTORE BIT_ULL(2) + +#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */ +#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */ +#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15) +#define GEN12_IDLE_CTX_ID 0x7FF +#define GEN12_CSB_CTX_VALID(csb_dw) \ + (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID) + +/* Typical size of the average request (2 pipecontrols and a MI_BB) */ +#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ +#define WA_TAIL_DWORDS 2 +#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) + +struct virtual_engine { + struct intel_engine_cs base; + struct intel_context context; + + /* + * We allow only a single request through the virtual engine at a time + * (each request in the timeline waits for the completion fence of + * the previous before being submitted). By restricting ourselves to + * only submitting a single request, each request is placed on to a + * physical to maximise load spreading (by virtue of the late greedy + * scheduling -- each real engine takes the next available request + * upon idling). + */ + struct i915_request *request; + + /* + * We keep a rbtree of available virtual engines inside each physical + * engine, sorted by priority. Here we preallocate the nodes we need + * for the virtual engine, indexed by physical_engine->id. + */ + struct ve_node { + struct rb_node rb; + int prio; + } nodes[I915_NUM_ENGINES]; + + /* + * Keep track of bonded pairs -- restrictions upon on our selection + * of physical engines any particular request may be submitted to. + * If we receive a submit-fence from a master engine, we will only + * use one of sibling_mask physical engines. + */ + struct ve_bond { + const struct intel_engine_cs *master; + intel_engine_mask_t sibling_mask; + } *bonds; + unsigned int num_bonds; + + /* And finally, which physical engines this virtual engine maps onto. */ + unsigned int num_siblings; + struct intel_engine_cs *siblings[0]; +}; + +static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) +{ + GEM_BUG_ON(!intel_engine_is_virtual(engine)); + return container_of(engine, struct virtual_engine, base); +} + +static int __execlists_context_alloc(struct intel_context *ce, + struct intel_engine_cs *engine); + +static void execlists_init_reg_state(u32 *reg_state, + const struct intel_context *ce, + const struct intel_engine_cs *engine, + const struct intel_ring *ring, + bool close); +static void +__execlists_update_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine); + +static void mark_eio(struct i915_request *rq) +{ + if (i915_request_completed(rq)) + return; + + GEM_BUG_ON(i915_request_signaled(rq)); + + dma_fence_set_error(&rq->fence, -EIO); + i915_request_mark_complete(rq); +} + +static struct i915_request * +active_request(const struct intel_timeline * const tl, struct i915_request *rq) +{ + struct i915_request *active = rq; + + rcu_read_lock(); + list_for_each_entry_continue_reverse(rq, &tl->requests, link) { + if (i915_request_completed(rq)) + break; + + active = rq; + } + rcu_read_unlock(); + + return active; +} + +static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) +{ + return (i915_ggtt_offset(engine->status_page.vma) + + I915_GEM_HWS_PREEMPT_ADDR); +} + +static inline void +ring_set_paused(const struct intel_engine_cs *engine, int state) +{ + /* + * We inspect HWS_PREEMPT with a semaphore inside + * engine->emit_fini_breadcrumb. If the dword is true, + * the ring is paused as the semaphore will busywait + * until the dword is false. + */ + engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; + if (state) + wmb(); +} + +static inline struct i915_priolist *to_priolist(struct rb_node *rb) +{ + return rb_entry(rb, struct i915_priolist, node); +} + +static inline int rq_prio(const struct i915_request *rq) +{ + return rq->sched.attr.priority; +} + +static int effective_prio(const struct i915_request *rq) +{ + int prio = rq_prio(rq); + + /* + * If this request is special and must not be interrupted at any + * cost, so be it. Note we are only checking the most recent request + * in the context and so may be masking an earlier vip request. It + * is hoped that under the conditions where nopreempt is used, this + * will not matter (i.e. all requests to that context will be + * nopreempt for as long as desired). + */ + if (i915_request_has_nopreempt(rq)) + prio = I915_PRIORITY_UNPREEMPTABLE; + + /* + * On unwinding the active request, we give it a priority bump + * if it has completed waiting on any semaphore. If we know that + * the request has already started, we can prevent an unwanted + * preempt-to-idle cycle by taking that into account now. + */ + if (__i915_request_has_started(rq)) + prio |= I915_PRIORITY_NOSEMAPHORE; + + /* Restrict mere WAIT boosts from triggering preemption */ + BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */ + return prio | __NO_PREEMPTION; +} + +static int queue_prio(const struct intel_engine_execlists *execlists) +{ + struct i915_priolist *p; + struct rb_node *rb; + + rb = rb_first_cached(&execlists->queue); + if (!rb) + return INT_MIN; + + /* + * As the priolist[] are inverted, with the highest priority in [0], + * we have to flip the index value to become priority. + */ + p = to_priolist(rb); + return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used); +} + +static inline bool need_preempt(const struct intel_engine_cs *engine, + const struct i915_request *rq, + struct rb_node *rb) +{ + int last_prio; + + if (!intel_engine_has_semaphores(engine)) + return false; + + /* + * Check if the current priority hint merits a preemption attempt. + * + * We record the highest value priority we saw during rescheduling + * prior to this dequeue, therefore we know that if it is strictly + * less than the current tail of ESLP[0], we do not need to force + * a preempt-to-idle cycle. + * + * However, the priority hint is a mere hint that we may need to + * preempt. If that hint is stale or we may be trying to preempt + * ourselves, ignore the request. + * + * More naturally we would write + * prio >= max(0, last); + * except that we wish to prevent triggering preemption at the same + * priority level: the task that is running should remain running + * to preserve FIFO ordering of dependencies. + */ + last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1); + if (engine->execlists.queue_priority_hint <= last_prio) + return false; + + /* + * Check against the first request in ELSP[1], it will, thanks to the + * power of PI, be the highest priority of that context. + */ + if (!list_is_last(&rq->sched.link, &engine->active.requests) && + rq_prio(list_next_entry(rq, sched.link)) > last_prio) + return true; + + if (rb) { + struct virtual_engine *ve = + rb_entry(rb, typeof(*ve), nodes[engine->id].rb); + bool preempt = false; + + if (engine == ve->siblings[0]) { /* only preempt one sibling */ + struct i915_request *next; + + rcu_read_lock(); + next = READ_ONCE(ve->request); + if (next) + preempt = rq_prio(next) > last_prio; + rcu_read_unlock(); + } + + if (preempt) + return preempt; + } + + /* + * If the inflight context did not trigger the preemption, then maybe + * it was the set of queued requests? Pick the highest priority in + * the queue (the first active priolist) and see if it deserves to be + * running instead of ELSP[0]. + * + * The highest priority request in the queue can not be either + * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same + * context, it's priority would not exceed ELSP[0] aka last_prio. + */ + return queue_prio(&engine->execlists) > last_prio; +} + +__maybe_unused static inline bool +assert_priority_queue(const struct i915_request *prev, + const struct i915_request *next) +{ + /* + * Without preemption, the prev may refer to the still active element + * which we refuse to let go. + * + * Even with preemption, there are times when we think it is better not + * to preempt and leave an ostensibly lower priority request in flight. + */ + if (i915_request_is_active(prev)) + return true; + + return rq_prio(prev) >= rq_prio(next); +} + +/* + * The context descriptor encodes various attributes of a context, + * including its GTT address and some flags. Because it's fairly + * expensive to calculate, we'll just do it once and cache the result, + * which remains valid until the context is unpinned. + * + * This is what a descriptor looks like, from LSB to MSB:: + * + * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template) + * bits 12-31: LRCA, GTT address of (the HWSP of) this context + * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC) + * bits 53-54: mbz, reserved for use by hardware + * bits 55-63: group ID, currently unused and set to 0 + * + * Starting from Gen11, the upper dword of the descriptor has a new format: + * + * bits 32-36: reserved + * bits 37-47: SW context ID + * bits 48:53: engine instance + * bit 54: mbz, reserved for use by hardware + * bits 55-60: SW counter + * bits 61-63: engine class + * + * engine info, SW context ID and SW counter need to form a unique number + * (Context ID) per lrc. + */ +static u64 +lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) +{ + u64 desc; + + desc = INTEL_LEGACY_32B_CONTEXT; + if (i915_vm_is_4lvl(ce->vm)) + desc = INTEL_LEGACY_64B_CONTEXT; + desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT; + + desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; + if (IS_GEN(engine->i915, 8)) + desc |= GEN8_CTX_L3LLC_COHERENT; + + desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */ + /* + * The following 32bits are copied into the OA reports (dword 2). + * Consider updating oa_get_render_ctx_id in i915_perf.c when changing + * anything below. + */ + if (INTEL_GEN(engine->i915) >= 11) { + desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT; + /* bits 48-53 */ + + desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT; + /* bits 61-63 */ + } + + return desc; +} + +static u32 *set_offsets(u32 *regs, + const u8 *data, + const struct intel_engine_cs *engine) +#define NOP(x) (BIT(7) | (x)) +#define LRI(count, flags) ((flags) << 6 | (count)) +#define POSTED BIT(0) +#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200)) +#define REG16(x) \ + (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ + (((x) >> 2) & 0x7f) +#define END() 0 +{ + const u32 base = engine->mmio_base; + + while (*data) { + u8 count, flags; + + if (*data & BIT(7)) { /* skip */ + regs += *data++ & ~BIT(7); + continue; + } + + count = *data & 0x3f; + flags = *data >> 6; + data++; + + *regs = MI_LOAD_REGISTER_IMM(count); + if (flags & POSTED) + *regs |= MI_LRI_FORCE_POSTED; + if (INTEL_GEN(engine->i915) >= 11) + *regs |= MI_LRI_CS_MMIO; + regs++; + + GEM_BUG_ON(!count); + do { + u32 offset = 0; + u8 v; + + do { + v = *data++; + offset <<= 7; + offset |= v & ~BIT(7); + } while (v & BIT(7)); + + *regs = base + (offset << 2); + regs += 2; + } while (--count); + } + + return regs; +} + +static const u8 gen8_xcs_offsets[] = { + NOP(1), + LRI(11, 0), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + + NOP(9), + LRI(9, 0), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(2, 0), + REG16(0x200), + REG(0x028), + + END(), +}; + +static const u8 gen9_xcs_offsets[] = { + NOP(1), + LRI(14, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + + NOP(3), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(1, POSTED), + REG16(0x200), + + NOP(13), + LRI(44, POSTED), + REG(0x028), + REG(0x09c), + REG(0x0c0), + REG(0x178), + REG(0x17c), + REG16(0x358), + REG(0x170), + REG(0x150), + REG(0x154), + REG(0x158), + REG16(0x41c), + REG16(0x600), + REG16(0x604), + REG16(0x608), + REG16(0x60c), + REG16(0x610), + REG16(0x614), + REG16(0x618), + REG16(0x61c), + REG16(0x620), + REG16(0x624), + REG16(0x628), + REG16(0x62c), + REG16(0x630), + REG16(0x634), + REG16(0x638), + REG16(0x63c), + REG16(0x640), + REG16(0x644), + REG16(0x648), + REG16(0x64c), + REG16(0x650), + REG16(0x654), + REG16(0x658), + REG16(0x65c), + REG16(0x660), + REG16(0x664), + REG16(0x668), + REG16(0x66c), + REG16(0x670), + REG16(0x674), + REG16(0x678), + REG16(0x67c), + REG(0x068), + + END(), +}; + +static const u8 gen12_xcs_offsets[] = { + NOP(1), + LRI(13, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + REG16(0x2b4), + + NOP(5), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + END(), +}; + +static const u8 gen8_rcs_offsets[] = { + NOP(1), + LRI(14, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + + NOP(3), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(1, 0), + REG(0x0c8), + + END(), +}; + +static const u8 gen11_rcs_offsets[] = { + NOP(1), + LRI(15, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + + NOP(1), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + LRI(1, POSTED), + REG(0x1b0), + + NOP(10), + LRI(1, 0), + REG(0x0c8), + + END(), +}; + +static const u8 gen12_rcs_offsets[] = { + NOP(1), + LRI(13, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + REG16(0x2b4), + + NOP(5), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + LRI(3, POSTED), + REG(0x1b0), + REG16(0x5a8), + REG16(0x5ac), + + NOP(6), + LRI(1, 0), + REG(0x0c8), + + END(), +}; + +#undef END +#undef REG16 +#undef REG +#undef LRI +#undef NOP + +static const u8 *reg_offsets(const struct intel_engine_cs *engine) +{ + /* + * The gen12+ lists only have the registers we program in the basic + * default state. We rely on the context image using relative + * addressing to automatic fixup the register state between the + * physical engines for virtual engine. + */ + GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 && + !intel_engine_has_relative_mmio(engine)); + + if (engine->class == RENDER_CLASS) { + if (INTEL_GEN(engine->i915) >= 12) + return gen12_rcs_offsets; + else if (INTEL_GEN(engine->i915) >= 11) + return gen11_rcs_offsets; + else + return gen8_rcs_offsets; + } else { + if (INTEL_GEN(engine->i915) >= 12) + return gen12_xcs_offsets; + else if (INTEL_GEN(engine->i915) >= 9) + return gen9_xcs_offsets; + else + return gen8_xcs_offsets; + } +} + +static void unwind_wa_tail(struct i915_request *rq) +{ + rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); + assert_ring_tail_valid(rq->ring, rq->tail); +} + +static struct i915_request * +__unwind_incomplete_requests(struct intel_engine_cs *engine) +{ + struct i915_request *rq, *rn, *active = NULL; + struct list_head *uninitialized_var(pl); + int prio = I915_PRIORITY_INVALID; + + lockdep_assert_held(&engine->active.lock); + + list_for_each_entry_safe_reverse(rq, rn, + &engine->active.requests, + sched.link) { + + if (i915_request_completed(rq)) + continue; /* XXX */ + + __i915_request_unsubmit(rq); + unwind_wa_tail(rq); + + /* + * Push the request back into the queue for later resubmission. + * If this request is not native to this physical engine (i.e. + * it came from a virtual source), push it back onto the virtual + * engine so that it can be moved across onto another physical + * engine as load dictates. + */ + if (likely(rq->execution_mask == engine->mask)) { + GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); + if (rq_prio(rq) != prio) { + prio = rq_prio(rq); + pl = i915_sched_lookup_priolist(engine, prio); + } + GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); + + list_move(&rq->sched.link, pl); + active = rq; + } else { + struct intel_engine_cs *owner = rq->hw_context->engine; + + /* + * Decouple the virtual breadcrumb before moving it + * back to the virtual engine -- we don't want the + * request to complete in the background and try + * and cancel the breadcrumb on the virtual engine + * (instead of the old engine where it is linked)! + */ + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &rq->fence.flags)) { + spin_lock_nested(&rq->lock, + SINGLE_DEPTH_NESTING); + i915_request_cancel_breadcrumb(rq); + spin_unlock(&rq->lock); + } + rq->engine = owner; + owner->submit_request(rq); + active = NULL; + } + } + + return active; +} + +struct i915_request * +execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) +{ + struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + + return __unwind_incomplete_requests(engine); +} + +static inline void +execlists_context_status_change(struct i915_request *rq, unsigned long status) +{ + /* + * Only used when GVT-g is enabled now. When GVT-g is disabled, + * The compiler should eliminate this function as dead-code. + */ + if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) + return; + + atomic_notifier_call_chain(&rq->engine->context_status_notifier, + status, rq); +} + +static void intel_engine_context_in(struct intel_engine_cs *engine) +{ + unsigned long flags; + + if (READ_ONCE(engine->stats.enabled) == 0) + return; + + write_seqlock_irqsave(&engine->stats.lock, flags); + + if (engine->stats.enabled > 0) { + if (engine->stats.active++ == 0) + engine->stats.start = ktime_get(); + GEM_BUG_ON(engine->stats.active == 0); + } + + write_sequnlock_irqrestore(&engine->stats.lock, flags); +} + +static void intel_engine_context_out(struct intel_engine_cs *engine) +{ + unsigned long flags; + + if (READ_ONCE(engine->stats.enabled) == 0) + return; + + write_seqlock_irqsave(&engine->stats.lock, flags); + + if (engine->stats.enabled > 0) { + ktime_t last; + + if (engine->stats.active && --engine->stats.active == 0) { + /* + * Decrement the active context count and in case GPU + * is now idle add up to the running total. + */ + last = ktime_sub(ktime_get(), engine->stats.start); + + engine->stats.total = ktime_add(engine->stats.total, + last); + } else if (engine->stats.active == 0) { + /* + * After turning on engine stats, context out might be + * the first event in which case we account from the + * time stats gathering was turned on. + */ + last = ktime_sub(ktime_get(), engine->stats.enabled_at); + + engine->stats.total = ktime_add(engine->stats.total, + last); + } + } + + write_sequnlock_irqrestore(&engine->stats.lock, flags); +} + +static void restore_default_state(struct intel_context *ce, + struct intel_engine_cs *engine) +{ + u32 *regs = ce->lrc_reg_state; + + if (engine->pinned_default_state) + memcpy(regs, /* skip restoring the vanilla PPHWSP */ + engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, + engine->context_size - PAGE_SIZE); + + execlists_init_reg_state(regs, ce, engine, ce->ring, false); +} + +static void reset_active(struct i915_request *rq, + struct intel_engine_cs *engine) +{ + struct intel_context * const ce = rq->hw_context; + u32 head; + + /* + * The executing context has been cancelled. We want to prevent + * further execution along this context and propagate the error on + * to anything depending on its results. + * + * In __i915_request_submit(), we apply the -EIO and remove the + * requests' payloads for any banned requests. But first, we must + * rewind the context back to the start of the incomplete request so + * that we do not jump back into the middle of the batch. + * + * We preserve the breadcrumbs and semaphores of the incomplete + * requests so that inter-timeline dependencies (i.e other timelines) + * remain correctly ordered. And we defer to __i915_request_submit() + * so that all asynchronous waits are correctly handled. + */ + GEM_TRACE("%s(%s): { rq=%llx:%lld }\n", + __func__, engine->name, rq->fence.context, rq->fence.seqno); + + /* On resubmission of the active request, payload will be scrubbed */ + if (i915_request_completed(rq)) + head = rq->tail; + else + head = active_request(ce->timeline, rq)->head; + ce->ring->head = intel_ring_wrap(ce->ring, head); + intel_ring_update_space(ce->ring); + + /* Scrub the context image to prevent replaying the previous batch */ + restore_default_state(ce, engine); + __execlists_update_reg_state(ce, engine); + + /* We've switched away, so this should be a no-op, but intent matters */ + ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; +} + +static inline struct intel_engine_cs * +__execlists_schedule_in(struct i915_request *rq) +{ + struct intel_engine_cs * const engine = rq->engine; + struct intel_context * const ce = rq->hw_context; + + intel_context_get(ce); + + if (unlikely(i915_gem_context_is_banned(ce->gem_context))) + reset_active(rq, engine); + + if (ce->tag) { + /* Use a fixed tag for OA and friends */ + ce->lrc_desc |= (u64)ce->tag << 32; + } else { + /* We don't need a strict matching tag, just different values */ + ce->lrc_desc &= ~GENMASK_ULL(47, 37); + ce->lrc_desc |= + (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) << + GEN11_SW_CTX_ID_SHIFT; + BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID); + } + + intel_gt_pm_get(engine->gt); + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); + intel_engine_context_in(engine); + + return engine; +} + +static inline struct i915_request * +execlists_schedule_in(struct i915_request *rq, int idx) +{ + struct intel_context * const ce = rq->hw_context; + struct intel_engine_cs *old; + + GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine)); + trace_i915_request_in(rq, idx); + + old = READ_ONCE(ce->inflight); + do { + if (!old) { + WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq)); + break; + } + } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old))); + + GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); + return i915_request_get(rq); +} + +static void kick_siblings(struct i915_request *rq, struct intel_context *ce) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + struct i915_request *next = READ_ONCE(ve->request); + + if (next && next->execution_mask & ~rq->execution_mask) + tasklet_schedule(&ve->base.execlists.tasklet); +} + +static inline void +__execlists_schedule_out(struct i915_request *rq, + struct intel_engine_cs * const engine) +{ + struct intel_context * const ce = rq->hw_context; + + /* + * NB process_csb() is not under the engine->active.lock and hence + * schedule_out can race with schedule_in meaning that we should + * refrain from doing non-trivial work here. + */ + + intel_engine_context_out(engine); + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); + intel_gt_pm_put(engine->gt); + + /* + * If this is part of a virtual engine, its next request may + * have been blocked waiting for access to the active context. + * We have to kick all the siblings again in case we need to + * switch (e.g. the next request is not runnable on this + * engine). Hopefully, we will already have submitted the next + * request before the tasklet runs and do not need to rebuild + * each virtual tree and kick everyone again. + */ + if (ce->engine != engine) + kick_siblings(rq, ce); + + intel_context_put(ce); +} + +static inline void +execlists_schedule_out(struct i915_request *rq) +{ + struct intel_context * const ce = rq->hw_context; + struct intel_engine_cs *cur, *old; + + trace_i915_request_out(rq); + + old = READ_ONCE(ce->inflight); + do + cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL; + while (!try_cmpxchg(&ce->inflight, &old, cur)); + if (!cur) + __execlists_schedule_out(rq, old); + + i915_request_put(rq); +} + +static u64 execlists_update_context(const struct i915_request *rq) +{ + struct intel_context *ce = rq->hw_context; + u64 desc; + + ce->lrc_reg_state[CTX_RING_TAIL] = + intel_ring_set_tail(rq->ring, rq->tail); + + /* + * Make sure the context image is complete before we submit it to HW. + * + * Ostensibly, writes (including the WCB) should be flushed prior to + * an uncached write such as our mmio register access, the empirical + * evidence (esp. on Braswell) suggests that the WC write into memory + * may not be visible to the HW prior to the completion of the UC + * register write and that we may begin execution from the context + * before its image is complete leading to invalid PD chasing. + * + * Furthermore, Braswell, at least, wants a full mb to be sure that + * the writes are coherent in memory (visible to the GPU) prior to + * execution, and not just visible to other CPUs (as is the result of + * wmb). + */ + mb(); + + desc = ce->lrc_desc; + ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; + + /* Wa_1607138340:tgl */ + if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0)) + desc |= CTX_DESC_FORCE_RESTORE; + + return desc; +} + +static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) +{ + if (execlists->ctrl_reg) { + writel(lower_32_bits(desc), execlists->submit_reg + port * 2); + writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); + } else { + writel(upper_32_bits(desc), execlists->submit_reg); + writel(lower_32_bits(desc), execlists->submit_reg); + } +} + +static __maybe_unused void +trace_ports(const struct intel_engine_execlists *execlists, + const char *msg, + struct i915_request * const *ports) +{ + const struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + + if (!ports[0]) + return; + + GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n", + engine->name, msg, + ports[0]->fence.context, + ports[0]->fence.seqno, + i915_request_completed(ports[0]) ? "!" : + i915_request_started(ports[0]) ? "*" : + "", + ports[1] ? ports[1]->fence.context : 0, + ports[1] ? ports[1]->fence.seqno : 0); +} + +static __maybe_unused bool +assert_pending_valid(const struct intel_engine_execlists *execlists, + const char *msg) +{ + struct i915_request * const *port, *rq; + struct intel_context *ce = NULL; + + trace_ports(execlists, msg, execlists->pending); + + if (!execlists->pending[0]) { + GEM_TRACE_ERR("Nothing pending for promotion!\n"); + return false; + } + + if (execlists->pending[execlists_num_ports(execlists)]) { + GEM_TRACE_ERR("Excess pending[%d] for promotion!\n", + execlists_num_ports(execlists)); + return false; + } + + for (port = execlists->pending; (rq = *port); port++) { + if (ce == rq->hw_context) { + GEM_TRACE_ERR("Duplicate context in pending[%zd]\n", + port - execlists->pending); + return false; + } + + ce = rq->hw_context; + if (i915_request_completed(rq)) + continue; + + if (i915_active_is_idle(&ce->active)) { + GEM_TRACE_ERR("Inactive context in pending[%zd]\n", + port - execlists->pending); + return false; + } + + if (!i915_vma_is_pinned(ce->state)) { + GEM_TRACE_ERR("Unpinned context in pending[%zd]\n", + port - execlists->pending); + return false; + } + + if (!i915_vma_is_pinned(ce->ring->vma)) { + GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n", + port - execlists->pending); + return false; + } + } + + return ce; +} + +static void execlists_submit_ports(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists *execlists = &engine->execlists; + unsigned int n; + + GEM_BUG_ON(!assert_pending_valid(execlists, "submit")); + + /* + * We can skip acquiring intel_runtime_pm_get() here as it was taken + * on our behalf by the request (see i915_gem_mark_busy()) and it will + * not be relinquished until the device is idle (see + * i915_gem_idle_work_handler()). As a precaution, we make sure + * that all ELSP are drained i.e. we have processed the CSB, + * before allowing ourselves to idle and calling intel_runtime_pm_put(). + */ + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + + /* + * ELSQ note: the submit queue is not cleared after being submitted + * to the HW so we need to make sure we always clean it up. This is + * currently ensured by the fact that we always write the same number + * of elsq entries, keep this in mind before changing the loop below. + */ + for (n = execlists_num_ports(execlists); n--; ) { + struct i915_request *rq = execlists->pending[n]; + + write_desc(execlists, + rq ? execlists_update_context(rq) : 0, + n); + } + + /* we need to manually load the submit queue */ + if (execlists->ctrl_reg) + writel(EL_CTRL_LOAD, execlists->ctrl_reg); +} + +static bool ctx_single_port_submission(const struct intel_context *ce) +{ + return (IS_ENABLED(CONFIG_DRM_I915_GVT) && + i915_gem_context_force_single_submission(ce->gem_context)); +} + +static bool can_merge_ctx(const struct intel_context *prev, + const struct intel_context *next) +{ + if (prev != next) + return false; + + if (ctx_single_port_submission(prev)) + return false; + + return true; +} + +static bool can_merge_rq(const struct i915_request *prev, + const struct i915_request *next) +{ + GEM_BUG_ON(prev == next); + GEM_BUG_ON(!assert_priority_queue(prev, next)); + + /* + * We do not submit known completed requests. Therefore if the next + * request is already completed, we can pretend to merge it in + * with the previous context (and we will skip updating the ELSP + * and tracking). Thus hopefully keeping the ELSP full with active + * contexts, despite the best efforts of preempt-to-busy to confuse + * us. + */ + if (i915_request_completed(next)) + return true; + + if (unlikely((prev->flags ^ next->flags) & + (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL))) + return false; + + if (!can_merge_ctx(prev->hw_context, next->hw_context)) + return false; + + return true; +} + +static void virtual_update_register_offsets(u32 *regs, + struct intel_engine_cs *engine) +{ + set_offsets(regs, reg_offsets(engine), engine); +} + +static bool virtual_matches(const struct virtual_engine *ve, + const struct i915_request *rq, + const struct intel_engine_cs *engine) +{ + const struct intel_engine_cs *inflight; + + if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ + return false; + + /* + * We track when the HW has completed saving the context image + * (i.e. when we have seen the final CS event switching out of + * the context) and must not overwrite the context image before + * then. This restricts us to only using the active engine + * while the previous virtualized request is inflight (so + * we reuse the register offsets). This is a very small + * hystersis on the greedy seelction algorithm. + */ + inflight = intel_context_inflight(&ve->context); + if (inflight && inflight != engine) + return false; + + return true; +} + +static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, + struct intel_engine_cs *engine) +{ + struct intel_engine_cs *old = ve->siblings[0]; + + /* All unattached (rq->engine == old) must already be completed */ + + spin_lock(&old->breadcrumbs.irq_lock); + if (!list_empty(&ve->context.signal_link)) { + list_move_tail(&ve->context.signal_link, + &engine->breadcrumbs.signalers); + intel_engine_queue_breadcrumbs(engine); + } + spin_unlock(&old->breadcrumbs.irq_lock); +} + +static struct i915_request * +last_active(const struct intel_engine_execlists *execlists) +{ + struct i915_request * const *last = READ_ONCE(execlists->active); + + while (*last && i915_request_completed(*last)) + last++; + + return *last; +} + +static void defer_request(struct i915_request *rq, struct list_head * const pl) +{ + LIST_HEAD(list); + + /* + * We want to move the interrupted request to the back of + * the round-robin list (i.e. its priority level), but + * in doing so, we must then move all requests that were in + * flight and were waiting for the interrupted request to + * be run after it again. + */ + do { + struct i915_dependency *p; + + GEM_BUG_ON(i915_request_is_active(rq)); + list_move_tail(&rq->sched.link, pl); + + list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { + struct i915_request *w = + container_of(p->waiter, typeof(*w), sched); + + /* Leave semaphores spinning on the other engines */ + if (w->engine != rq->engine) + continue; + + /* No waiter should start before its signaler */ + GEM_BUG_ON(i915_request_started(w) && + !i915_request_completed(rq)); + + GEM_BUG_ON(i915_request_is_active(w)); + if (list_empty(&w->sched.link)) + continue; /* Not yet submitted; unready */ + + if (rq_prio(w) < rq_prio(rq)) + continue; + + GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); + list_move_tail(&w->sched.link, &list); + } + + rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); + } while (rq); +} + +static void defer_active(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = __unwind_incomplete_requests(engine); + if (!rq) + return; + + defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); +} + +static bool +need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) +{ + int hint; + + if (!intel_engine_has_timeslices(engine)) + return false; + + if (list_is_last(&rq->sched.link, &engine->active.requests)) + return false; + + hint = max(rq_prio(list_next_entry(rq, sched.link)), + engine->execlists.queue_priority_hint); + + return hint >= effective_prio(rq); +} + +static int +switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) +{ + if (list_is_last(&rq->sched.link, &engine->active.requests)) + return INT_MIN; + + return rq_prio(list_next_entry(rq, sched.link)); +} + +static inline unsigned long +timeslice(const struct intel_engine_cs *engine) +{ + return READ_ONCE(engine->props.timeslice_duration_ms); +} + +static unsigned long +active_timeslice(const struct intel_engine_cs *engine) +{ + const struct i915_request *rq = *engine->execlists.active; + + if (i915_request_completed(rq)) + return 0; + + if (engine->execlists.switch_priority_hint < effective_prio(rq)) + return 0; + + return timeslice(engine); +} + +static void set_timeslice(struct intel_engine_cs *engine) +{ + if (!intel_engine_has_timeslices(engine)) + return; + + set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); +} + +static void record_preemption(struct intel_engine_execlists *execlists) +{ + (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); +} + +static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = last_active(&engine->execlists); + if (!rq) + return 0; + + /* Force a fast reset for terminated contexts (ignoring sysfs!) */ + if (unlikely(i915_gem_context_is_banned(rq->gem_context))) + return 1; + + return READ_ONCE(engine->props.preempt_timeout_ms); +} + +static void set_preempt_timeout(struct intel_engine_cs *engine) +{ + if (!intel_engine_has_preempt_reset(engine)) + return; + + set_timer_ms(&engine->execlists.preempt, + active_preempt_timeout(engine)); +} + +static void execlists_dequeue(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request **port = execlists->pending; + struct i915_request ** const last_port = port + execlists->port_mask; + struct i915_request *last; + struct rb_node *rb; + bool submit = false; + + /* + * Hardware submission is through 2 ports. Conceptually each port + * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is + * static for a context, and unique to each, so we only execute + * requests belonging to a single context from each ring. RING_HEAD + * is maintained by the CS in the context image, it marks the place + * where it got up to last time, and through RING_TAIL we tell the CS + * where we want to execute up to this time. + * + * In this list the requests are in order of execution. Consecutive + * requests from the same context are adjacent in the ringbuffer. We + * can combine these requests into a single RING_TAIL update: + * + * RING_HEAD...req1...req2 + * ^- RING_TAIL + * since to execute req2 the CS must first execute req1. + * + * Our goal then is to point each port to the end of a consecutive + * sequence of requests as being the most optimal (fewest wake ups + * and context switches) submission. + */ + + for (rb = rb_first_cached(&execlists->virtual); rb; ) { + struct virtual_engine *ve = + rb_entry(rb, typeof(*ve), nodes[engine->id].rb); + struct i915_request *rq = READ_ONCE(ve->request); + + if (!rq) { /* lazily cleanup after another engine handled rq */ + rb_erase_cached(rb, &execlists->virtual); + RB_CLEAR_NODE(rb); + rb = rb_first_cached(&execlists->virtual); + continue; + } + + if (!virtual_matches(ve, rq, engine)) { + rb = rb_next(rb); + continue; + } + + break; + } + + /* + * If the queue is higher priority than the last + * request in the currently active context, submit afresh. + * We will resubmit again afterwards in case we need to split + * the active context to interject the preemption request, + * i.e. we will retrigger preemption following the ack in case + * of trouble. + */ + last = last_active(execlists); + if (last) { + if (need_preempt(engine, last, rb)) { + GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n", + engine->name, + last->fence.context, + last->fence.seqno, + last->sched.attr.priority, + execlists->queue_priority_hint); + record_preemption(execlists); + + /* + * Don't let the RING_HEAD advance past the breadcrumb + * as we unwind (and until we resubmit) so that we do + * not accidentally tell it to go backwards. + */ + ring_set_paused(engine, 1); + + /* + * Note that we have not stopped the GPU at this point, + * so we are unwinding the incomplete requests as they + * remain inflight and so by the time we do complete + * the preemption, some of the unwound requests may + * complete! + */ + __unwind_incomplete_requests(engine); + + /* + * If we need to return to the preempted context, we + * need to skip the lite-restore and force it to + * reload the RING_TAIL. Otherwise, the HW has a + * tendency to ignore us rewinding the TAIL to the + * end of an earlier request. + */ + last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; + last = NULL; + } else if (need_timeslice(engine, last) && + timer_expired(&engine->execlists.timer)) { + GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n", + engine->name, + last->fence.context, + last->fence.seqno, + last->sched.attr.priority, + execlists->queue_priority_hint); + + ring_set_paused(engine, 1); + defer_active(engine); + + /* + * Unlike for preemption, if we rewind and continue + * executing the same context as previously active, + * the order of execution will remain the same and + * the tail will only advance. We do not need to + * force a full context restore, as a lite-restore + * is sufficient to resample the monotonic TAIL. + * + * If we switch to any other context, similarly we + * will not rewind TAIL of current context, and + * normal save/restore will preserve state and allow + * us to later continue executing the same request. + */ + last = NULL; + } else { + /* + * Otherwise if we already have a request pending + * for execution after the current one, we can + * just wait until the next CS event before + * queuing more. In either case we will force a + * lite-restore preemption event, but if we wait + * we hopefully coalesce several updates into a single + * submission. + */ + if (!list_is_last(&last->sched.link, + &engine->active.requests)) { + /* + * Even if ELSP[1] is occupied and not worthy + * of timeslices, our queue might be. + */ + if (!execlists->timer.expires && + need_timeslice(engine, last)) + set_timer_ms(&execlists->timer, + timeslice(engine)); + + return; + } + + /* + * WaIdleLiteRestore:bdw,skl + * Apply the wa NOOPs to prevent + * ring:HEAD == rq:TAIL as we resubmit the + * request. See gen8_emit_fini_breadcrumb() for + * where we prepare the padding after the + * end of the request. + */ + last->tail = last->wa_tail; + } + } + + while (rb) { /* XXX virtual is always taking precedence */ + struct virtual_engine *ve = + rb_entry(rb, typeof(*ve), nodes[engine->id].rb); + struct i915_request *rq; + + spin_lock(&ve->base.active.lock); + + rq = ve->request; + if (unlikely(!rq)) { /* lost the race to a sibling */ + spin_unlock(&ve->base.active.lock); + rb_erase_cached(rb, &execlists->virtual); + RB_CLEAR_NODE(rb); + rb = rb_first_cached(&execlists->virtual); + continue; + } + + GEM_BUG_ON(rq != ve->request); + GEM_BUG_ON(rq->engine != &ve->base); + GEM_BUG_ON(rq->hw_context != &ve->context); + + if (rq_prio(rq) >= queue_prio(execlists)) { + if (!virtual_matches(ve, rq, engine)) { + spin_unlock(&ve->base.active.lock); + rb = rb_next(rb); + continue; + } + + if (last && !can_merge_rq(last, rq)) { + spin_unlock(&ve->base.active.lock); + return; /* leave this for another */ + } + + GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n", + engine->name, + rq->fence.context, + rq->fence.seqno, + i915_request_completed(rq) ? "!" : + i915_request_started(rq) ? "*" : + "", + yesno(engine != ve->siblings[0])); + + ve->request = NULL; + ve->base.execlists.queue_priority_hint = INT_MIN; + rb_erase_cached(rb, &execlists->virtual); + RB_CLEAR_NODE(rb); + + GEM_BUG_ON(!(rq->execution_mask & engine->mask)); + rq->engine = engine; + + if (engine != ve->siblings[0]) { + u32 *regs = ve->context.lrc_reg_state; + unsigned int n; + + GEM_BUG_ON(READ_ONCE(ve->context.inflight)); + + if (!intel_engine_has_relative_mmio(engine)) + virtual_update_register_offsets(regs, + engine); + + if (!list_empty(&ve->context.signals)) + virtual_xfer_breadcrumbs(ve, engine); + + /* + * Move the bound engine to the top of the list + * for future execution. We then kick this + * tasklet first before checking others, so that + * we preferentially reuse this set of bound + * registers. + */ + for (n = 1; n < ve->num_siblings; n++) { + if (ve->siblings[n] == engine) { + swap(ve->siblings[n], + ve->siblings[0]); + break; + } + } + + GEM_BUG_ON(ve->siblings[0] != engine); + } + + if (__i915_request_submit(rq)) { + submit = true; + last = rq; + } + i915_request_put(rq); + + /* + * Hmm, we have a bunch of virtual engine requests, + * but the first one was already completed (thanks + * preempt-to-busy!). Keep looking at the veng queue + * until we have no more relevant requests (i.e. + * the normal submit queue has higher priority). + */ + if (!submit) { + spin_unlock(&ve->base.active.lock); + rb = rb_first_cached(&execlists->virtual); + continue; + } + } + + spin_unlock(&ve->base.active.lock); + break; + } + + while ((rb = rb_first_cached(&execlists->queue))) { + struct i915_priolist *p = to_priolist(rb); + struct i915_request *rq, *rn; + int i; + + priolist_for_each_request_consume(rq, rn, p, i) { + bool merge = true; + + /* + * Can we combine this request with the current port? + * It has to be the same context/ringbuffer and not + * have any exceptions (e.g. GVT saying never to + * combine contexts). + * + * If we can combine the requests, we can execute both + * by updating the RING_TAIL to point to the end of the + * second request, and so we never need to tell the + * hardware about the first. + */ + if (last && !can_merge_rq(last, rq)) { + /* + * If we are on the second port and cannot + * combine this request with the last, then we + * are done. + */ + if (port == last_port) + goto done; + + /* + * We must not populate both ELSP[] with the + * same LRCA, i.e. we must submit 2 different + * contexts if we submit 2 ELSP. + */ + if (last->hw_context == rq->hw_context) + goto done; + + if (i915_request_has_sentinel(last)) + goto done; + + /* + * If GVT overrides us we only ever submit + * port[0], leaving port[1] empty. Note that we + * also have to be careful that we don't queue + * the same context (even though a different + * request) to the second port. + */ + if (ctx_single_port_submission(last->hw_context) || + ctx_single_port_submission(rq->hw_context)) + goto done; + + merge = false; + } + + if (__i915_request_submit(rq)) { + if (!merge) { + *port = execlists_schedule_in(last, port - execlists->pending); + port++; + last = NULL; + } + + GEM_BUG_ON(last && + !can_merge_ctx(last->hw_context, + rq->hw_context)); + + submit = true; + last = rq; + } + } + + rb_erase_cached(&p->node, &execlists->queue); + i915_priolist_free(p); + } + +done: + /* + * Here be a bit of magic! Or sleight-of-hand, whichever you prefer. + * + * We choose the priority hint such that if we add a request of greater + * priority than this, we kick the submission tasklet to decide on + * the right order of submitting the requests to hardware. We must + * also be prepared to reorder requests as they are in-flight on the + * HW. We derive the priority hint then as the first "hole" in + * the HW submission ports and if there are no available slots, + * the priority of the lowest executing request, i.e. last. + * + * When we do receive a higher priority request ready to run from the + * user, see queue_request(), the priority hint is bumped to that + * request triggering preemption on the next dequeue (or subsequent + * interrupt for secondary ports). + */ + execlists->queue_priority_hint = queue_prio(execlists); + GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n", + engine->name, execlists->queue_priority_hint, + yesno(submit)); + + if (submit) { + *port = execlists_schedule_in(last, port - execlists->pending); + execlists->switch_priority_hint = + switch_prio(engine, *execlists->pending); + + /* + * Skip if we ended up with exactly the same set of requests, + * e.g. trying to timeslice a pair of ordered contexts + */ + if (!memcmp(execlists->active, execlists->pending, + (port - execlists->pending + 1) * sizeof(*port))) { + do + execlists_schedule_out(fetch_and_zero(port)); + while (port-- != execlists->pending); + + goto skip_submit; + } + + memset(port + 1, 0, (last_port - port) * sizeof(*port)); + execlists_submit_ports(engine); + + set_preempt_timeout(engine); + } else { +skip_submit: + ring_set_paused(engine, 0); + } +} + +static void +cancel_port_requests(struct intel_engine_execlists * const execlists) +{ + struct i915_request * const *port, *rq; + + for (port = execlists->pending; (rq = *port); port++) + execlists_schedule_out(rq); + memset(execlists->pending, 0, sizeof(execlists->pending)); + + for (port = execlists->active; (rq = *port); port++) + execlists_schedule_out(rq); + execlists->active = + memset(execlists->inflight, 0, sizeof(execlists->inflight)); +} + +static inline void +invalidate_csb_entries(const u32 *first, const u32 *last) +{ + clflush((void *)first); + clflush((void *)last); +} + +static inline bool +reset_in_progress(const struct intel_engine_execlists *execlists) +{ + return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); +} + +/* + * Starting with Gen12, the status has a new format: + * + * bit 0: switched to new queue + * bit 1: reserved + * bit 2: semaphore wait mode (poll or signal), only valid when + * switch detail is set to "wait on semaphore" + * bits 3-5: engine class + * bits 6-11: engine instance + * bits 12-14: reserved + * bits 15-25: sw context id of the lrc the GT switched to + * bits 26-31: sw counter of the lrc the GT switched to + * bits 32-35: context switch detail + * - 0: ctx complete + * - 1: wait on sync flip + * - 2: wait on vblank + * - 3: wait on scanline + * - 4: wait on semaphore + * - 5: context preempted (not on SEMAPHORE_WAIT or + * WAIT_FOR_EVENT) + * bit 36: reserved + * bits 37-43: wait detail (for switch detail 1 to 4) + * bits 44-46: reserved + * bits 47-57: sw context id of the lrc the GT switched away from + * bits 58-63: sw counter of the lrc the GT switched away from + */ +static inline bool +gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) +{ + u32 lower_dw = csb[0]; + u32 upper_dw = csb[1]; + bool ctx_to_valid = GEN12_CSB_CTX_VALID(lower_dw); + bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw); + bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; + + /* + * The context switch detail is not guaranteed to be 5 when a preemption + * occurs, so we can't just check for that. The check below works for + * all the cases we care about, including preemptions of WAIT + * instructions and lite-restore. Preempt-to-idle via the CTRL register + * would require some extra handling, but we don't support that. + */ + if (!ctx_away_valid || new_queue) { + GEM_BUG_ON(!ctx_to_valid); + return true; + } + + /* + * switch detail = 5 is covered by the case above and we do not expect a + * context switch on an unsuccessful wait instruction since we always + * use polling mode. + */ + GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw)); + return false; +} + +static inline bool +gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) +{ + return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); +} + +static void process_csb(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + const u32 * const buf = execlists->csb_status; + const u8 num_entries = execlists->csb_size; + u8 head, tail; + + /* + * As we modify our execlists state tracking we require exclusive + * access. Either we are inside the tasklet, or the tasklet is disabled + * and we assume that is only inside the reset paths and so serialised. + */ + GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) && + !reset_in_progress(execlists)); + GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine)); + + /* + * Note that csb_write, csb_status may be either in HWSP or mmio. + * When reading from the csb_write mmio register, we have to be + * careful to only use the GEN8_CSB_WRITE_PTR portion, which is + * the low 4bits. As it happens we know the next 4bits are always + * zero and so we can simply masked off the low u8 of the register + * and treat it identically to reading from the HWSP (without having + * to use explicit shifting and masking, and probably bifurcating + * the code to handle the legacy mmio read). + */ + head = execlists->csb_head; + tail = READ_ONCE(*execlists->csb_write); + GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail); + if (unlikely(head == tail)) + return; + + /* + * Hopefully paired with a wmb() in HW! + * + * We must complete the read of the write pointer before any reads + * from the CSB, so that we do not see stale values. Without an rmb + * (lfence) the HW may speculatively perform the CSB[] reads *before* + * we perform the READ_ONCE(*csb_write). + */ + rmb(); + + do { + bool promote; + + if (++head == num_entries) + head = 0; + + /* + * We are flying near dragons again. + * + * We hold a reference to the request in execlist_port[] + * but no more than that. We are operating in softirq + * context and so cannot hold any mutex or sleep. That + * prevents us stopping the requests we are processing + * in port[] from being retired simultaneously (the + * breadcrumb will be complete before we see the + * context-switch). As we only hold the reference to the + * request, any pointer chasing underneath the request + * is subject to a potential use-after-free. Thus we + * store all of the bookkeeping within port[] as + * required, and avoid using unguarded pointers beneath + * request itself. The same applies to the atomic + * status notifier. + */ + + GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n", + engine->name, head, + buf[2 * head + 0], buf[2 * head + 1]); + + if (INTEL_GEN(engine->i915) >= 12) + promote = gen12_csb_parse(execlists, buf + 2 * head); + else + promote = gen8_csb_parse(execlists, buf + 2 * head); + if (promote) { + if (!inject_preempt_hang(execlists)) + ring_set_paused(engine, 0); + + /* cancel old inflight, prepare for switch */ + trace_ports(execlists, "preempted", execlists->active); + while (*execlists->active) + execlists_schedule_out(*execlists->active++); + + /* switch pending to inflight */ + GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); + execlists->active = + memcpy(execlists->inflight, + execlists->pending, + execlists_num_ports(execlists) * + sizeof(*execlists->pending)); + + set_timeslice(engine); + + WRITE_ONCE(execlists->pending[0], NULL); + } else { + GEM_BUG_ON(!*execlists->active); + + /* port0 completed, advanced to port1 */ + trace_ports(execlists, "completed", execlists->active); + + /* + * We rely on the hardware being strongly + * ordered, that the breadcrumb write is + * coherent (visible from the CPU) before the + * user interrupt and CSB is processed. + */ + GEM_BUG_ON(!i915_request_completed(*execlists->active) && + !reset_in_progress(execlists)); + execlists_schedule_out(*execlists->active++); + + GEM_BUG_ON(execlists->active - execlists->inflight > + execlists_num_ports(execlists)); + } + } while (head != tail); + + execlists->csb_head = head; + + /* + * Gen11 has proven to fail wrt global observation point between + * entry and tail update, failing on the ordering and thus + * we see an old entry in the context status buffer. + * + * Forcibly evict out entries for the next gpu csb update, + * to increase the odds that we get a fresh entries with non + * working hardware. The cost for doing so comes out mostly with + * the wash as hardware, working or not, will need to do the + * invalidation before. + */ + invalidate_csb_entries(&buf[0], &buf[num_entries - 1]); +} + +static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) +{ + lockdep_assert_held(&engine->active.lock); + if (!engine->execlists.pending[0]) { + rcu_read_lock(); /* protect peeking at execlists->active */ + execlists_dequeue(engine); + rcu_read_unlock(); + } +} + +static noinline void preempt_reset(struct intel_engine_cs *engine) +{ + const unsigned int bit = I915_RESET_ENGINE + engine->id; + unsigned long *lock = &engine->gt->reset.flags; + + if (i915_modparams.reset < 3) + return; + + if (test_and_set_bit(bit, lock)) + return; + + /* Mark this tasklet as disabled to avoid waiting for it to complete */ + tasklet_disable_nosync(&engine->execlists.tasklet); + + GEM_TRACE("%s: preempt timeout %lu+%ums\n", + engine->name, + READ_ONCE(engine->props.preempt_timeout_ms), + jiffies_to_msecs(jiffies - engine->execlists.preempt.expires)); + intel_engine_reset(engine, "preemption time out"); + + tasklet_enable(&engine->execlists.tasklet); + clear_and_wake_up_bit(bit, lock); +} + +static bool preempt_timeout(const struct intel_engine_cs *const engine) +{ + const struct timer_list *t = &engine->execlists.preempt; + + if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) + return false; + + if (!timer_expired(t)) + return false; + + return READ_ONCE(engine->execlists.pending[0]); +} + +/* + * Check the unread Context Status Buffers and manage the submission of new + * contexts to the ELSP accordingly. + */ +static void execlists_submission_tasklet(unsigned long data) +{ + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + bool timeout = preempt_timeout(engine); + + process_csb(engine); + if (!READ_ONCE(engine->execlists.pending[0]) || timeout) { + unsigned long flags; + + spin_lock_irqsave(&engine->active.lock, flags); + __execlists_submission_tasklet(engine); + spin_unlock_irqrestore(&engine->active.lock, flags); + + /* Recheck after serialising with direct-submission */ + if (timeout && preempt_timeout(engine)) + preempt_reset(engine); + } +} + +static void __execlists_kick(struct intel_engine_execlists *execlists) +{ + /* Kick the tasklet for some interrupt coalescing and reset handling */ + tasklet_hi_schedule(&execlists->tasklet); +} + +#define execlists_kick(t, member) \ + __execlists_kick(container_of(t, struct intel_engine_execlists, member)) + +static void execlists_timeslice(struct timer_list *timer) +{ + execlists_kick(timer, timer); +} + +static void execlists_preempt(struct timer_list *timer) +{ + execlists_kick(timer, preempt); +} + +static void queue_request(struct intel_engine_cs *engine, + struct i915_sched_node *node, + int prio) +{ + GEM_BUG_ON(!list_empty(&node->link)); + list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); +} + +static void __submit_queue_imm(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + if (reset_in_progress(execlists)) + return; /* defer until we restart the engine following reset */ + + if (execlists->tasklet.func == execlists_submission_tasklet) + __execlists_submission_tasklet(engine); + else + tasklet_hi_schedule(&execlists->tasklet); +} + +static void submit_queue(struct intel_engine_cs *engine, + const struct i915_request *rq) +{ + struct intel_engine_execlists *execlists = &engine->execlists; + + if (rq_prio(rq) <= execlists->queue_priority_hint) + return; + + execlists->queue_priority_hint = rq_prio(rq); + __submit_queue_imm(engine); +} + +static void execlists_submit_request(struct i915_request *request) +{ + struct intel_engine_cs *engine = request->engine; + unsigned long flags; + + /* Will be called from irq-context when using foreign fences. */ + spin_lock_irqsave(&engine->active.lock, flags); + + queue_request(engine, &request->sched, rq_prio(request)); + + GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); + GEM_BUG_ON(list_empty(&request->sched.link)); + + submit_queue(engine, request); + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void __execlists_context_fini(struct intel_context *ce) +{ + intel_ring_put(ce->ring); + i915_vma_put(ce->state); +} + +static void execlists_context_destroy(struct kref *kref) +{ + struct intel_context *ce = container_of(kref, typeof(*ce), ref); + + GEM_BUG_ON(!i915_active_is_idle(&ce->active)); + GEM_BUG_ON(intel_context_is_pinned(ce)); + + if (ce->state) + __execlists_context_fini(ce); + + intel_context_fini(ce); + intel_context_free(ce); +} + +static void +set_redzone(void *vaddr, const struct intel_engine_cs *engine) +{ + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + return; + + vaddr += engine->context_size; + + memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); +} + +static void +check_redzone(const void *vaddr, const struct intel_engine_cs *engine) +{ + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + return; + + vaddr += engine->context_size; + + if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) + dev_err_once(engine->i915->drm.dev, + "%s context redzone overwritten!\n", + engine->name); +} + +static void execlists_context_unpin(struct intel_context *ce) +{ + check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE, + ce->engine); + + i915_gem_object_unpin_map(ce->state->obj); + intel_ring_reset(ce->ring, ce->ring->tail); +} + +static void +__execlists_update_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) +{ + struct intel_ring *ring = ce->ring; + u32 *regs = ce->lrc_reg_state; + + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); + + regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma); + regs[CTX_RING_HEAD] = ring->head; + regs[CTX_RING_TAIL] = ring->tail; + + /* RPCS */ + if (engine->class == RENDER_CLASS) { + regs[CTX_R_PWR_CLK_STATE] = + intel_sseu_make_rpcs(engine->i915, &ce->sseu); + + i915_oa_init_reg_state(ce, engine); + } +} + +static int +__execlists_context_pin(struct intel_context *ce, + struct intel_engine_cs *engine) +{ + void *vaddr; + int ret; + + GEM_BUG_ON(!ce->state); + + ret = intel_context_active_acquire(ce); + if (ret) + goto err; + GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); + + vaddr = i915_gem_object_pin_map(ce->state->obj, + i915_coherent_map_type(engine->i915) | + I915_MAP_OVERRIDE); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto unpin_active; + } + + ce->lrc_desc = lrc_descriptor(ce, engine); + ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; + __execlists_update_reg_state(ce, engine); + + return 0; + +unpin_active: + intel_context_active_release(ce); +err: + return ret; +} + +static int execlists_context_pin(struct intel_context *ce) +{ + return __execlists_context_pin(ce, ce->engine); +} + +static int execlists_context_alloc(struct intel_context *ce) +{ + return __execlists_context_alloc(ce, ce->engine); +} + +static void execlists_context_reset(struct intel_context *ce) +{ + /* + * Because we emit WA_TAIL_DWORDS there may be a disparity + * between our bookkeeping in ce->ring->head and ce->ring->tail and + * that stored in context. As we only write new commands from + * ce->ring->tail onwards, everything before that is junk. If the GPU + * starts reading from its RING_HEAD from the context, it may try to + * execute that junk and die. + * + * The contexts that are stilled pinned on resume belong to the + * kernel, and are local to each engine. All other contexts will + * have their head/tail sanitized upon pinning before use, so they + * will never see garbage, + * + * So to avoid that we reset the context images upon resume. For + * simplicity, we just zero everything out. + */ + intel_ring_reset(ce->ring, 0); + __execlists_update_reg_state(ce, ce->engine); +} + +static const struct intel_context_ops execlists_context_ops = { + .alloc = execlists_context_alloc, + + .pin = execlists_context_pin, + .unpin = execlists_context_unpin, + + .enter = intel_context_enter_engine, + .exit = intel_context_exit_engine, + + .reset = execlists_context_reset, + .destroy = execlists_context_destroy, +}; + +static int gen8_emit_init_breadcrumb(struct i915_request *rq) +{ + u32 *cs; + + GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb); + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Check if we have been preempted before we even get started. + * + * After this point i915_request_started() reports true, even if + * we get preempted and so are no longer running. + */ + *cs++ = MI_ARB_CHECK; + *cs++ = MI_NOOP; + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_request_timeline(rq)->hwsp_offset; + *cs++ = 0; + *cs++ = rq->fence.seqno - 1; + + intel_ring_advance(rq, cs); + + /* Record the updated position of the request's payload */ + rq->infix = intel_ring_offset(rq, cs); + + return 0; +} + +static int execlists_request_alloc(struct i915_request *request) +{ + int ret; + + GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); + + /* + * Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. + */ + request->reserved_space += EXECLISTS_REQUEST_SIZE; + + /* + * Note that after this point, we have committed to using + * this request as it is being used to both track the + * state of engine initialisation and liveness of the + * golden renderstate above. Think twice before you try + * to cancel/unwind this request now. + */ + + /* Unconditionally invalidate GPU caches and TLBs. */ + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); + if (ret) + return ret; + + request->reserved_space -= EXECLISTS_REQUEST_SIZE; + return 0; +} + +/* + * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after + * PIPE_CONTROL instruction. This is required for the flush to happen correctly + * but there is a slight complication as this is applied in WA batch where the + * values are only initialized once so we cannot take register value at the + * beginning and reuse it further; hence we save its value to memory, upload a + * constant value with bit21 set and then we restore it back with the saved value. + * To simplify the WA, a constant value is formed by using the default value + * of this register. This shouldn't be a problem because we are only modifying + * it for a short period and this batch in non-premptible. We can ofcourse + * use additional instructions that read the actual value of the register + * at that time and set our bit of interest but it makes the WA complicated. + * + * This WA is also required for Gen9 so extracting as a function avoids + * code duplication. + */ +static u32 * +gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) +{ + /* NB no one else is allowed to scribble over scratch + 256! */ + *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); + *batch++ = intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); + *batch++ = 0; + + *batch++ = MI_LOAD_REGISTER_IMM(1); + *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); + *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; + + batch = gen8_emit_pipe_control(batch, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_DC_FLUSH_ENABLE, + 0); + + *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); + *batch++ = intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); + *batch++ = 0; + + return batch; +} + +/* + * Typically we only have one indirect_ctx and per_ctx batch buffer which are + * initialized at the beginning and shared across all contexts but this field + * helps us to have multiple batches at different offsets and select them based + * on a criteria. At the moment this batch always start at the beginning of the page + * and at this point we don't have multiple wa_ctx batch buffers. + * + * The number of WA applied are not known at the beginning; we use this field + * to return the no of DWORDS written. + * + * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END + * so it adds NOOPs as padding to make it cacheline aligned. + * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together + * makes a complete batch buffer. + */ +static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) +{ + /* WaDisableCtxRestoreArbitration:bdw,chv */ + *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ + if (IS_BROADWELL(engine->i915)) + batch = gen8_emit_flush_coherentl3_wa(engine, batch); + + /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ + /* Actual scratch location is at 128 bytes offset */ + batch = gen8_emit_pipe_control(batch, + PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_STORE_DATA_INDEX | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE, + LRC_PPHWSP_SCRATCH_ADDR); + + *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + /* Pad to end of cacheline */ + while ((unsigned long)batch % CACHELINE_BYTES) + *batch++ = MI_NOOP; + + /* + * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because + * execution depends on the length specified in terms of cache lines + * in the register CTX_RCS_INDIRECT_CTX + */ + + return batch; +} + +struct lri { + i915_reg_t reg; + u32 value; +}; + +static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count) +{ + GEM_BUG_ON(!count || count > 63); + + *batch++ = MI_LOAD_REGISTER_IMM(count); + do { + *batch++ = i915_mmio_reg_offset(lri->reg); + *batch++ = lri->value; + } while (lri++, --count); + *batch++ = MI_NOOP; + + return batch; +} + +static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) +{ + static const struct lri lri[] = { + /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ + { + COMMON_SLICE_CHICKEN2, + __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE, + 0), + }, + + /* BSpec: 11391 */ + { + FF_SLICE_CHICKEN, + __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX, + FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX), + }, + + /* BSpec: 11299 */ + { + _3D_CHICKEN3, + __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX, + _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX), + } + }; + + *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ + batch = gen8_emit_flush_coherentl3_wa(engine, batch); + + batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); + + /* WaMediaPoolStateCmdInWABB:bxt,glk */ + if (HAS_POOLED_EU(engine->i915)) { + /* + * EU pool configuration is setup along with golden context + * during context initialization. This value depends on + * device type (2x6 or 3x6) and needs to be updated based + * on which subslice is disabled especially for 2x6 + * devices, however it is safe to load default + * configuration of 3x6 device instead of masking off + * corresponding bits because HW ignores bits of a disabled + * subslice and drops down to appropriate config. Please + * see render_state_setup() in i915_gem_render_state.c for + * possible configurations, to avoid duplication they are + * not shown here again. + */ + *batch++ = GEN9_MEDIA_POOL_STATE; + *batch++ = GEN9_MEDIA_POOL_ENABLE; + *batch++ = 0x00777000; + *batch++ = 0; + *batch++ = 0; + *batch++ = 0; + } + + *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + /* Pad to end of cacheline */ + while ((unsigned long)batch % CACHELINE_BYTES) + *batch++ = MI_NOOP; + + return batch; +} + +static u32 * +gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) +{ + int i; + + /* + * WaPipeControlBefore3DStateSamplePattern: cnl + * + * Ensure the engine is idle prior to programming a + * 3DSTATE_SAMPLE_PATTERN during a context restore. + */ + batch = gen8_emit_pipe_control(batch, + PIPE_CONTROL_CS_STALL, + 0); + /* + * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for + * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in + * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is + * confusing. Since gen8_emit_pipe_control() already advances the + * batch by 6 dwords, we advance the other 10 here, completing a + * cacheline. It's not clear if the workaround requires this padding + * before other commands, or if it's just the regular padding we would + * already have for the workaround bb, so leave it here for now. + */ + for (i = 0; i < 10; i++) + *batch++ = MI_NOOP; + + /* Pad to end of cacheline */ + while ((unsigned long)batch % CACHELINE_BYTES) + *batch++ = MI_NOOP; + + return batch; +} + +#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE) + +static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) + goto err; + + engine->wa_ctx.vma = vma; + return 0; + +err: + i915_gem_object_put(obj); + return err; +} + +static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) +{ + i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); +} + +typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); + +static int intel_init_workaround_bb(struct intel_engine_cs *engine) +{ + struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; + struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx, + &wa_ctx->per_ctx }; + wa_bb_func_t wa_bb_fn[2]; + struct page *page; + void *batch, *batch_ptr; + unsigned int i; + int ret; + + if (engine->class != RENDER_CLASS) + return 0; + + switch (INTEL_GEN(engine->i915)) { + case 12: + case 11: + return 0; + case 10: + wa_bb_fn[0] = gen10_init_indirectctx_bb; + wa_bb_fn[1] = NULL; + break; + case 9: + wa_bb_fn[0] = gen9_init_indirectctx_bb; + wa_bb_fn[1] = NULL; + break; + case 8: + wa_bb_fn[0] = gen8_init_indirectctx_bb; + wa_bb_fn[1] = NULL; + break; + default: + MISSING_CASE(INTEL_GEN(engine->i915)); + return 0; + } + + ret = lrc_setup_wa_ctx(engine); + if (ret) { + DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); + return ret; + } + + page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); + batch = batch_ptr = kmap_atomic(page); + + /* + * Emit the two workaround batch buffers, recording the offset from the + * start of the workaround batch buffer object for each and their + * respective sizes. + */ + for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { + wa_bb[i]->offset = batch_ptr - batch; + if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, + CACHELINE_BYTES))) { + ret = -EINVAL; + break; + } + if (wa_bb_fn[i]) + batch_ptr = wa_bb_fn[i](engine, batch_ptr); + wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); + } + + BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); + + kunmap_atomic(batch); + if (ret) + lrc_destroy_wa_ctx(engine); + + return ret; +} + +static void enable_execlists(struct intel_engine_cs *engine) +{ + u32 mode; + + assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); + + intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ + + if (INTEL_GEN(engine->i915) >= 11) + mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); + else + mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); + ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); + + ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + + ENGINE_WRITE_FW(engine, + RING_HWS_PGA, + i915_ggtt_offset(engine->status_page.vma)); + ENGINE_POSTING_READ(engine, RING_HWS_PGA); +} + +static bool unexpected_starting_state(struct intel_engine_cs *engine) +{ + bool unexpected = false; + + if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { + DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n"); + unexpected = true; + } + + return unexpected; +} + +static int execlists_resume(struct intel_engine_cs *engine) +{ + intel_engine_apply_workarounds(engine); + intel_engine_apply_whitelist(engine); + + intel_mocs_init_engine(engine); + + intel_engine_reset_breadcrumbs(engine); + + if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { + struct drm_printer p = drm_debug_printer(__func__); + + intel_engine_dump(engine, &p, NULL); + } + + enable_execlists(engine); + + return 0; +} + +static void execlists_reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + unsigned long flags; + + GEM_TRACE("%s: depth<-%d\n", engine->name, + atomic_read(&execlists->tasklet.count)); + + /* + * Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its execlists->tasklet *just* as we are + * calling engine->resume() and also writing the ELSP. + * Turning off the execlists->tasklet until the reset is over + * prevents the race. + */ + __tasklet_disable_sync_once(&execlists->tasklet); + GEM_BUG_ON(!reset_in_progress(execlists)); + + /* And flush any current direct submission. */ + spin_lock_irqsave(&engine->active.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); + + /* + * We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * + * FIXME: Wa for more modern gens needs to be validated + */ + intel_engine_stop_cs(engine); +} + +static void reset_csb_pointers(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + const unsigned int reset_value = execlists->csb_size - 1; + + ring_set_paused(engine, 0); + + /* + * After a reset, the HW starts writing into CSB entry [0]. We + * therefore have to set our HEAD pointer back one entry so that + * the *first* entry we check is entry 0. To complicate this further, + * as we don't wait for the first interrupt after reset, we have to + * fake the HW write to point back to the last entry so that our + * inline comparison of our cached head position against the last HW + * write works even before the first interrupt. + */ + execlists->csb_head = reset_value; + WRITE_ONCE(*execlists->csb_write, reset_value); + wmb(); /* Make sure this is visible to HW (paranoia?) */ + + invalidate_csb_entries(&execlists->csb_status[0], + &execlists->csb_status[reset_value]); +} + +static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) +{ + if (INTEL_GEN(engine->i915) >= 12) + return 0x60; + else if (INTEL_GEN(engine->i915) >= 9) + return 0x54; + else if (engine->class == RENDER_CLASS) + return 0x58; + else + return -1; +} + +static void __execlists_reset_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) +{ + u32 *regs = ce->lrc_reg_state; + int x; + + x = lrc_ring_mi_mode(engine); + if (x != -1) { + regs[x + 1] &= ~STOP_RING; + regs[x + 1] |= STOP_RING << 16; + } +} + +static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct intel_context *ce; + struct i915_request *rq; + + mb(); /* paranoia: read the CSB pointers from after the reset */ + clflush(execlists->csb_write); + mb(); + + process_csb(engine); /* drain preemption events */ + + /* Following the reset, we need to reload the CSB read/write pointers */ + reset_csb_pointers(engine); + + /* + * Save the currently executing context, even if we completed + * its request, it was still running at the time of the + * reset and will have been clobbered. + */ + rq = execlists_active(execlists); + if (!rq) + goto unwind; + + /* We still have requests in-flight; the engine should be active */ + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + + ce = rq->hw_context; + GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); + + if (i915_request_completed(rq)) { + /* Idle context; tidy up the ring so we can restart afresh */ + ce->ring->head = intel_ring_wrap(ce->ring, rq->tail); + goto out_replay; + } + + /* Context has requests still in-flight; it should not be idle! */ + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + rq = active_request(ce->timeline, rq); + ce->ring->head = intel_ring_wrap(ce->ring, rq->head); + GEM_BUG_ON(ce->ring->head == ce->ring->tail); + + /* + * If this request hasn't started yet, e.g. it is waiting on a + * semaphore, we need to avoid skipping the request or else we + * break the signaling chain. However, if the context is corrupt + * the request will not restart and we will be stuck with a wedged + * device. It is quite often the case that if we issue a reset + * while the GPU is loading the context image, that the context + * image becomes corrupt. + * + * Otherwise, if we have not started yet, the request should replay + * perfectly and we do not need to flag the result as being erroneous. + */ + if (!i915_request_started(rq)) + goto out_replay; + + /* + * If the request was innocent, we leave the request in the ELSP + * and will try to replay it on restarting. The context image may + * have been corrupted by the reset, in which case we may have + * to service a new GPU hang, but more likely we can continue on + * without impact. + * + * If the request was guilty, we presume the context is corrupt + * and have to at least restore the RING register in the context + * image back to the expected values to skip over the guilty request. + */ + __i915_request_reset(rq, stalled); + if (!stalled) + goto out_replay; + + /* + * We want a simple context + ring to execute the breadcrumb update. + * We cannot rely on the context being intact across the GPU hang, + * so clear it and rebuild just what we need for the breadcrumb. + * All pending requests for this context will be zapped, and any + * future request will be after userspace has had the opportunity + * to recreate its own state. + */ + GEM_BUG_ON(!intel_context_is_pinned(ce)); + restore_default_state(ce, engine); + +out_replay: + GEM_TRACE("%s replay {head:%04x, tail:%04x}\n", + engine->name, ce->ring->head, ce->ring->tail); + intel_ring_update_space(ce->ring); + __execlists_reset_reg_state(ce, engine); + __execlists_update_reg_state(ce, engine); + ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ + +unwind: + /* Push back any incomplete requests for replay after the reset. */ + cancel_port_requests(execlists); + __unwind_incomplete_requests(engine); +} + +static void execlists_reset(struct intel_engine_cs *engine, bool stalled) +{ + unsigned long flags; + + GEM_TRACE("%s\n", engine->name); + + spin_lock_irqsave(&engine->active.lock, flags); + + __execlists_reset(engine, stalled); + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void nop_submission_tasklet(unsigned long data) +{ + /* The driver is wedged; don't process any more events. */ +} + +static void execlists_cancel_requests(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *rq, *rn; + struct rb_node *rb; + unsigned long flags; + + GEM_TRACE("%s\n", engine->name); + + /* + * Before we call engine->cancel_requests(), we should have exclusive + * access to the submission state. This is arranged for us by the + * caller disabling the interrupt generation, the tasklet and other + * threads that may then access the same state, giving us a free hand + * to reset state. However, we still need to let lockdep be aware that + * we know this state may be accessed in hardirq context, so we + * disable the irq around this manipulation and we want to keep + * the spinlock focused on its duties and not accidentally conflate + * coverage to the submission's irq state. (Similarly, although we + * shouldn't need to disable irq around the manipulation of the + * submission's irq state, we also wish to remind ourselves that + * it is irq state.) + */ + spin_lock_irqsave(&engine->active.lock, flags); + + __execlists_reset(engine, true); + + /* Mark all executing requests as skipped. */ + list_for_each_entry(rq, &engine->active.requests, sched.link) + mark_eio(rq); + + /* Flush the queued requests to the timeline list (for retiring). */ + while ((rb = rb_first_cached(&execlists->queue))) { + struct i915_priolist *p = to_priolist(rb); + int i; + + priolist_for_each_request_consume(rq, rn, p, i) { + mark_eio(rq); + __i915_request_submit(rq); + } + + rb_erase_cached(&p->node, &execlists->queue); + i915_priolist_free(p); + } + + /* Cancel all attached virtual engines */ + while ((rb = rb_first_cached(&execlists->virtual))) { + struct virtual_engine *ve = + rb_entry(rb, typeof(*ve), nodes[engine->id].rb); + + rb_erase_cached(rb, &execlists->virtual); + RB_CLEAR_NODE(rb); + + spin_lock(&ve->base.active.lock); + rq = fetch_and_zero(&ve->request); + if (rq) { + mark_eio(rq); + + rq->engine = engine; + __i915_request_submit(rq); + i915_request_put(rq); + + ve->base.execlists.queue_priority_hint = INT_MIN; + } + spin_unlock(&ve->base.active.lock); + } + + /* Remaining _unready_ requests will be nop'ed when submitted */ + + execlists->queue_priority_hint = INT_MIN; + execlists->queue = RB_ROOT_CACHED; + + GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); + execlists->tasklet.func = nop_submission_tasklet; + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void execlists_reset_finish(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + /* + * After a GPU reset, we may have requests to replay. Do so now while + * we still have the forcewake to be sure that the GPU is not allowed + * to sleep before we restart and reload a context. + */ + GEM_BUG_ON(!reset_in_progress(execlists)); + if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) + execlists->tasklet.func(execlists->tasklet.data); + + if (__tasklet_enable(&execlists->tasklet)) + /* And kick in case we missed a new request submission. */ + tasklet_hi_schedule(&execlists->tasklet); + GEM_TRACE("%s: depth->%d\n", engine->name, + atomic_read(&execlists->tasklet.count)); +} + +static int gen8_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * WaDisableCtxRestoreArbitration:bdw,chv + * + * We don't need to perform MI_ARB_ENABLE as often as we do (in + * particular all the gen that do not need the w/a at all!), if we + * took care to make sure that on every switch into this context + * (both ordinary and for preemption) that arbitrartion was enabled + * we would be fine. However, for gen8 there is another w/a that + * requires us to not preempt inside GPGPU execution, so we keep + * arbitration disabled for gen8 batches. Arbitration will be + * re-enabled before we close the request + * (engine->emit_fini_breadcrumb). + */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + /* FIXME(BDW+): Address space and security selectors. */ + *cs++ = MI_BATCH_BUFFER_START_GEN8 | + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + + intel_ring_advance(rq, cs); + + return 0; +} + +static int gen9_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + *cs++ = MI_BATCH_BUFFER_START_GEN8 | + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + return 0; +} + +static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) +{ + ENGINE_WRITE(engine, RING_IMR, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + ENGINE_POSTING_READ(engine, RING_IMR); +} + +static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) +{ + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); +} + +static int gen8_emit_flush(struct i915_request *request, u32 mode) +{ + u32 cmd, *cs; + + cs = intel_ring_begin(request, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cmd = MI_FLUSH_DW + 1; + + /* We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + + if (mode & EMIT_INVALIDATE) { + cmd |= MI_INVALIDATE_TLB; + if (request->engine->class == VIDEO_DECODE_CLASS) + cmd |= MI_INVALIDATE_BSD; + } + + *cs++ = cmd; + *cs++ = LRC_PPHWSP_SCRATCH_ADDR; + *cs++ = 0; /* upper addr */ + *cs++ = 0; /* value */ + intel_ring_advance(request, cs); + + return 0; +} + +static int gen8_emit_flush_render(struct i915_request *request, + u32 mode) +{ + bool vf_flush_wa = false, dc_flush_wa = false; + u32 *cs, flags = 0; + int len; + + flags |= PIPE_CONTROL_CS_STALL; + + if (mode & EMIT_FLUSH) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; + flags |= PIPE_CONTROL_FLUSH_ENABLE; + } + + if (mode & EMIT_INVALIDATE) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + + /* + * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL + * pipe control. + */ + if (IS_GEN(request->i915, 9)) + vf_flush_wa = true; + + /* WaForGAMHang:kbl */ + if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0)) + dc_flush_wa = true; + } + + len = 6; + + if (vf_flush_wa) + len += 6; + + if (dc_flush_wa) + len += 12; + + cs = intel_ring_begin(request, len); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + if (vf_flush_wa) + cs = gen8_emit_pipe_control(cs, 0, 0); + + if (dc_flush_wa) + cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, + 0); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + + if (dc_flush_wa) + cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); + + intel_ring_advance(request, cs); + + return 0; +} + +static int gen11_emit_flush_render(struct i915_request *request, + u32 mode) +{ + if (mode & EMIT_FLUSH) { + u32 *cs; + u32 flags = 0; + + flags |= PIPE_CONTROL_CS_STALL; + + flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; + flags |= PIPE_CONTROL_FLUSH_ENABLE; + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + + cs = intel_ring_begin(request, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(request, cs); + } + + if (mode & EMIT_INVALIDATE) { + u32 *cs; + u32 flags = 0; + + flags |= PIPE_CONTROL_CS_STALL; + + flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + + cs = intel_ring_begin(request, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(request, cs); + } + + return 0; +} + +static u32 preparser_disable(bool state) +{ + return MI_ARB_CHECK | 1 << 8 | state; +} + +static int gen12_emit_flush_render(struct i915_request *request, + u32 mode) +{ + if (mode & EMIT_FLUSH) { + u32 flags = 0; + u32 *cs; + + flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* Wa_1409600907:tgl */ + flags |= PIPE_CONTROL_DEPTH_STALL; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; + flags |= PIPE_CONTROL_FLUSH_ENABLE; + flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH; + + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + flags |= PIPE_CONTROL_QW_WRITE; + + flags |= PIPE_CONTROL_CS_STALL; + + cs = intel_ring_begin(request, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(request, cs); + } + + if (mode & EMIT_INVALIDATE) { + u32 flags = 0; + u32 *cs; + + flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_L3_RO_CACHE_INVALIDATE; + + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + flags |= PIPE_CONTROL_QW_WRITE; + + flags |= PIPE_CONTROL_CS_STALL; + + cs = intel_ring_begin(request, 8); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Prevent the pre-parser from skipping past the TLB + * invalidate and loading a stale page for the batch + * buffer / request payload. + */ + *cs++ = preparser_disable(true); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + + *cs++ = preparser_disable(false); + intel_ring_advance(request, cs); + + /* + * Wa_1604544889:tgl + */ + if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) { + flags = 0; + flags |= PIPE_CONTROL_CS_STALL; + flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH; + + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + flags |= PIPE_CONTROL_QW_WRITE; + + cs = intel_ring_begin(request, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen8_emit_pipe_control(cs, flags, + LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(request, cs); + } + } + + return 0; +} + +/* + * Reserve space for 2 NOOPs at the end of each request to be + * used as a workaround for not being allowed to do lite + * restore with HEAD==TAIL (WaIdleLiteRestore). + */ +static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) +{ + /* Ensure there's always at least one preemption point per-request. */ + *cs++ = MI_ARB_CHECK; + *cs++ = MI_NOOP; + request->wa_tail = intel_ring_offset(request, cs); + + return cs; +} + +static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs) +{ + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = intel_hws_preempt_address(request->engine); + *cs++ = 0; + + return cs; +} + +static __always_inline u32* +gen8_emit_fini_breadcrumb_footer(struct i915_request *request, + u32 *cs) +{ + *cs++ = MI_USER_INTERRUPT; + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + if (intel_engine_has_semaphores(request->engine)) + cs = emit_preempt_busywait(request, cs); + + request->tail = intel_ring_offset(request, cs); + assert_ring_tail_valid(request->ring, request->tail); + + return gen8_emit_wa_tail(request, cs); +} + +static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) +{ + cs = gen8_emit_ggtt_write(cs, + request->fence.seqno, + i915_request_active_timeline(request)->hwsp_offset, + 0); + + return gen8_emit_fini_breadcrumb_footer(request, cs); +} + +static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) +{ + cs = gen8_emit_pipe_control(cs, + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE, + 0); + + /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ + cs = gen8_emit_ggtt_write_rcs(cs, + request->fence.seqno, + i915_request_active_timeline(request)->hwsp_offset, + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_CS_STALL); + + return gen8_emit_fini_breadcrumb_footer(request, cs); +} + +static u32 * +gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) +{ + cs = gen8_emit_ggtt_write_rcs(cs, + request->fence.seqno, + i915_request_active_timeline(request)->hwsp_offset, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_TILE_CACHE_FLUSH | + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE); + + return gen8_emit_fini_breadcrumb_footer(request, cs); +} + +/* + * Note that the CS instruction pre-parser will not stall on the breadcrumb + * flush and will continue pre-fetching the instructions after it before the + * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at + * BB_START/END instructions, so, even though we might pre-fetch the pre-amble + * of the next request before the memory has been flushed, we're guaranteed that + * we won't access the batch itself too early. + * However, on gen12+ the parser can pre-fetch across the BB_START/END commands, + * so, if the current request is modifying an instruction in the next request on + * the same intel_context, we might pre-fetch and then execute the pre-update + * instruction. To avoid this, the users of self-modifying code should either + * disable the parser around the code emitting the memory writes, via a new flag + * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For + * the in-kernel use-cases we've opted to use a separate context, see + * reloc_gpu() as an example. + * All the above applies only to the instructions themselves. Non-inline data + * used by the instructions is not pre-fetched. + */ + +static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs) +{ + *cs++ = MI_SEMAPHORE_WAIT_TOKEN | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = intel_hws_preempt_address(request->engine); + *cs++ = 0; + *cs++ = 0; + *cs++ = MI_NOOP; + + return cs; +} + +static __always_inline u32* +gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs) +{ + *cs++ = MI_USER_INTERRUPT; + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + if (intel_engine_has_semaphores(request->engine)) + cs = gen12_emit_preempt_busywait(request, cs); + + request->tail = intel_ring_offset(request, cs); + assert_ring_tail_valid(request->ring, request->tail); + + return gen8_emit_wa_tail(request, cs); +} + +static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) +{ + cs = gen8_emit_ggtt_write(cs, + request->fence.seqno, + i915_request_active_timeline(request)->hwsp_offset, + 0); + + return gen12_emit_fini_breadcrumb_footer(request, cs); +} + +static u32 * +gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) +{ + cs = gen8_emit_ggtt_write_rcs(cs, + request->fence.seqno, + i915_request_active_timeline(request)->hwsp_offset, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_TILE_CACHE_FLUSH | + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + /* Wa_1409600907:tgl */ + PIPE_CONTROL_DEPTH_STALL | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_HDC_PIPELINE_FLUSH); + + return gen12_emit_fini_breadcrumb_footer(request, cs); +} + +static void execlists_park(struct intel_engine_cs *engine) +{ + cancel_timer(&engine->execlists.timer); + cancel_timer(&engine->execlists.preempt); +} + +void intel_execlists_set_default_submission(struct intel_engine_cs *engine) +{ + engine->submit_request = execlists_submit_request; + engine->cancel_requests = execlists_cancel_requests; + engine->schedule = i915_schedule; + engine->execlists.tasklet.func = execlists_submission_tasklet; + + engine->reset.prepare = execlists_reset_prepare; + engine->reset.reset = execlists_reset; + engine->reset.finish = execlists_reset_finish; + + engine->park = execlists_park; + engine->unpark = NULL; + + engine->flags |= I915_ENGINE_SUPPORTS_STATS; + if (!intel_vgpu_active(engine->i915)) { + engine->flags |= I915_ENGINE_HAS_SEMAPHORES; + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) + engine->flags |= I915_ENGINE_HAS_PREEMPTION; + } + + if (INTEL_GEN(engine->i915) >= 12) + engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; +} + +static void execlists_destroy(struct intel_engine_cs *engine) +{ + intel_engine_cleanup_common(engine); + lrc_destroy_wa_ctx(engine); + kfree(engine); +} + +static void +logical_ring_default_vfuncs(struct intel_engine_cs *engine) +{ + /* Default vfuncs which can be overriden by each engine. */ + + engine->destroy = execlists_destroy; + engine->resume = execlists_resume; + + engine->reset.prepare = execlists_reset_prepare; + engine->reset.reset = execlists_reset; + engine->reset.finish = execlists_reset_finish; + + engine->cops = &execlists_context_ops; + engine->request_alloc = execlists_request_alloc; + + engine->emit_flush = gen8_emit_flush; + engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; + if (INTEL_GEN(engine->i915) >= 12) + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb; + + engine->set_default_submission = intel_execlists_set_default_submission; + + if (INTEL_GEN(engine->i915) < 11) { + engine->irq_enable = gen8_logical_ring_enable_irq; + engine->irq_disable = gen8_logical_ring_disable_irq; + } else { + /* + * TODO: On Gen11 interrupt masks need to be clear + * to allow C6 entry. Keep interrupts enabled at + * and take the hit of generating extra interrupts + * until a more refined solution exists. + */ + } + if (IS_GEN(engine->i915, 8)) + engine->emit_bb_start = gen8_emit_bb_start; + else + engine->emit_bb_start = gen9_emit_bb_start; +} + +static inline void +logical_ring_default_irqs(struct intel_engine_cs *engine) +{ + unsigned int shift = 0; + + if (INTEL_GEN(engine->i915) < 11) { + const u8 irq_shifts[] = { + [RCS0] = GEN8_RCS_IRQ_SHIFT, + [BCS0] = GEN8_BCS_IRQ_SHIFT, + [VCS0] = GEN8_VCS0_IRQ_SHIFT, + [VCS1] = GEN8_VCS1_IRQ_SHIFT, + [VECS0] = GEN8_VECS_IRQ_SHIFT, + }; + + shift = irq_shifts[engine->id]; + } + + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; + engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; +} + +static void rcs_submission_override(struct intel_engine_cs *engine) +{ + switch (INTEL_GEN(engine->i915)) { + case 12: + engine->emit_flush = gen12_emit_flush_render; + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; + break; + case 11: + engine->emit_flush = gen11_emit_flush_render; + engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; + break; + default: + engine->emit_flush = gen8_emit_flush_render; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; + break; + } +} + +int intel_execlists_submission_setup(struct intel_engine_cs *engine) +{ + tasklet_init(&engine->execlists.tasklet, + execlists_submission_tasklet, (unsigned long)engine); + timer_setup(&engine->execlists.timer, execlists_timeslice, 0); + timer_setup(&engine->execlists.preempt, execlists_preempt, 0); + + logical_ring_default_vfuncs(engine); + logical_ring_default_irqs(engine); + + if (engine->class == RENDER_CLASS) + rcs_submission_override(engine); + + return 0; +} + +int intel_execlists_submission_init(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_private *i915 = engine->i915; + struct intel_uncore *uncore = engine->uncore; + u32 base = engine->mmio_base; + int ret; + + ret = intel_engine_init_common(engine); + if (ret) + return ret; + + if (intel_init_workaround_bb(engine)) + /* + * We continue even if we fail to initialize WA batch + * because we only expect rare glitches but nothing + * critical to prevent us from using GPU + */ + DRM_ERROR("WA batch buffer initialization failed\n"); + + if (HAS_LOGICAL_RING_ELSQ(i915)) { + execlists->submit_reg = uncore->regs + + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); + execlists->ctrl_reg = uncore->regs + + i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); + } else { + execlists->submit_reg = uncore->regs + + i915_mmio_reg_offset(RING_ELSP(base)); + } + + execlists->csb_status = + &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; + + execlists->csb_write = + &engine->status_page.addr[intel_hws_csb_write_index(i915)]; + + if (INTEL_GEN(i915) < 11) + execlists->csb_size = GEN8_CSB_ENTRIES; + else + execlists->csb_size = GEN11_CSB_ENTRIES; + + reset_csb_pointers(engine); + + return 0; +} + +static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine) +{ + u32 indirect_ctx_offset; + + switch (INTEL_GEN(engine->i915)) { + default: + MISSING_CASE(INTEL_GEN(engine->i915)); + /* fall through */ + case 12: + indirect_ctx_offset = + GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + break; + case 11: + indirect_ctx_offset = + GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + break; + case 10: + indirect_ctx_offset = + GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + break; + case 9: + indirect_ctx_offset = + GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + break; + case 8: + indirect_ctx_offset = + GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + break; + } + + return indirect_ctx_offset; +} + + +static void init_common_reg_state(u32 * const regs, + const struct intel_engine_cs *engine, + const struct intel_ring *ring) +{ + regs[CTX_CONTEXT_CONTROL] = + _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | + _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); + if (INTEL_GEN(engine->i915) < 11) + regs[CTX_CONTEXT_CONTROL] |= + _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | + CTX_CTRL_RS_CTX_ENABLE); + + regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID; + regs[CTX_BB_STATE] = RING_BB_PPGTT; +} + +static void init_wa_bb_reg_state(u32 * const regs, + const struct intel_engine_cs *engine, + u32 pos_bb_per_ctx) +{ + const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; + + if (wa_ctx->per_ctx.size) { + const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); + + regs[pos_bb_per_ctx] = + (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; + } + + if (wa_ctx->indirect_ctx.size) { + const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); + + regs[pos_bb_per_ctx + 2] = + (ggtt_offset + wa_ctx->indirect_ctx.offset) | + (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); + + regs[pos_bb_per_ctx + 4] = + intel_lr_indirect_ctx_offset(engine) << 6; + } +} + +static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt) +{ + if (i915_vm_is_4lvl(&ppgtt->vm)) { + /* 64b PPGTT (48bit canonical) + * PDP0_DESCRIPTOR contains the base address to PML4 and + * other PDP Descriptors are ignored. + */ + ASSIGN_CTX_PML4(ppgtt, regs); + } else { + ASSIGN_CTX_PDP(ppgtt, regs, 3); + ASSIGN_CTX_PDP(ppgtt, regs, 2); + ASSIGN_CTX_PDP(ppgtt, regs, 1); + ASSIGN_CTX_PDP(ppgtt, regs, 0); + } +} + +static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) +{ + if (i915_is_ggtt(vm)) + return i915_vm_to_ggtt(vm)->alias; + else + return i915_vm_to_ppgtt(vm); +} + +static void execlists_init_reg_state(u32 *regs, + const struct intel_context *ce, + const struct intel_engine_cs *engine, + const struct intel_ring *ring, + bool close) +{ + /* + * A context is actually a big batch buffer with several + * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The + * values we are setting here are only for the first context restore: + * on a subsequent save, the GPU will recreate this batchbuffer with new + * values (including all the missing MI_LOAD_REGISTER_IMM commands that + * we are not initializing here). + * + * Must keep consistent with virtual_update_register_offsets(). + */ + u32 *bbe = set_offsets(regs, reg_offsets(engine), engine); + + if (close) { /* Close the batch; used mainly by live_lrc_layout() */ + *bbe = MI_BATCH_BUFFER_END; + if (INTEL_GEN(engine->i915) >= 10) + *bbe |= BIT(0); + } + + init_common_reg_state(regs, engine, ring); + init_ppgtt_reg_state(regs, vm_alias(ce->vm)); + + init_wa_bb_reg_state(regs, engine, + INTEL_GEN(engine->i915) >= 12 ? + GEN12_CTX_BB_PER_CTX_PTR : + CTX_BB_PER_CTX_PTR); +} + +static int +populate_lr_context(struct intel_context *ce, + struct drm_i915_gem_object *ctx_obj, + struct intel_engine_cs *engine, + struct intel_ring *ring) +{ + bool inhibit = true; + void *vaddr; + u32 *regs; + int ret; + + vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); + return ret; + } + + set_redzone(vaddr, engine); + + if (engine->default_state) { + void *defaults; + + defaults = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (IS_ERR(defaults)) { + ret = PTR_ERR(defaults); + goto err_unpin_ctx; + } + + memcpy(vaddr, defaults, engine->context_size); + i915_gem_object_unpin_map(engine->default_state); + inhibit = false; + } + + /* The second page of the context object contains some fields which must + * be set up prior to the first execution. */ + regs = vaddr + LRC_STATE_PN * PAGE_SIZE; + execlists_init_reg_state(regs, ce, engine, ring, inhibit); + if (inhibit) + regs[CTX_CONTEXT_CONTROL] |= + _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); + + ret = 0; +err_unpin_ctx: + __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size); + i915_gem_object_unpin_map(ctx_obj); + return ret; +} + +static int __execlists_context_alloc(struct intel_context *ce, + struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *ctx_obj; + struct intel_ring *ring; + struct i915_vma *vma; + u32 context_size; + int ret; + + GEM_BUG_ON(ce->state); + context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); + + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + context_size += I915_GTT_PAGE_SIZE; /* for redzone */ + + ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size); + if (IS_ERR(ctx_obj)) + return PTR_ERR(ctx_obj); + + vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto error_deref_obj; + } + + if (!ce->timeline) { + struct intel_timeline *tl; + + tl = intel_timeline_create(engine->gt, NULL); + if (IS_ERR(tl)) { + ret = PTR_ERR(tl); + goto error_deref_obj; + } + + ce->timeline = tl; + } + + ring = intel_engine_create_ring(engine, (unsigned long)ce->ring); + if (IS_ERR(ring)) { + ret = PTR_ERR(ring); + goto error_deref_obj; + } + + ret = populate_lr_context(ce, ctx_obj, engine, ring); + if (ret) { + DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); + goto error_ring_free; + } + + ce->ring = ring; + ce->state = vma; + + return 0; + +error_ring_free: + intel_ring_put(ring); +error_deref_obj: + i915_gem_object_put(ctx_obj); + return ret; +} + +static struct list_head *virtual_queue(struct virtual_engine *ve) +{ + return &ve->base.execlists.default_priolist.requests[0]; +} + +static void virtual_context_destroy(struct kref *kref) +{ + struct virtual_engine *ve = + container_of(kref, typeof(*ve), context.ref); + unsigned int n; + + GEM_BUG_ON(!list_empty(virtual_queue(ve))); + GEM_BUG_ON(ve->request); + GEM_BUG_ON(ve->context.inflight); + + for (n = 0; n < ve->num_siblings; n++) { + struct intel_engine_cs *sibling = ve->siblings[n]; + struct rb_node *node = &ve->nodes[sibling->id].rb; + + if (RB_EMPTY_NODE(node)) + continue; + + spin_lock_irq(&sibling->active.lock); + + /* Detachment is lazily performed in the execlists tasklet */ + if (!RB_EMPTY_NODE(node)) + rb_erase_cached(node, &sibling->execlists.virtual); + + spin_unlock_irq(&sibling->active.lock); + } + GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); + + if (ve->context.state) + __execlists_context_fini(&ve->context); + intel_context_fini(&ve->context); + + kfree(ve->bonds); + kfree(ve); +} + +static void virtual_engine_initial_hint(struct virtual_engine *ve) +{ + int swp; + + /* + * Pick a random sibling on starting to help spread the load around. + * + * New contexts are typically created with exactly the same order + * of siblings, and often started in batches. Due to the way we iterate + * the array of sibling when submitting requests, sibling[0] is + * prioritised for dequeuing. If we make sure that sibling[0] is fairly + * randomised across the system, we also help spread the load by the + * first engine we inspect being different each time. + * + * NB This does not force us to execute on this engine, it will just + * typically be the first we inspect for submission. + */ + swp = prandom_u32_max(ve->num_siblings); + if (!swp) + return; + + swap(ve->siblings[swp], ve->siblings[0]); + if (!intel_engine_has_relative_mmio(ve->siblings[0])) + virtual_update_register_offsets(ve->context.lrc_reg_state, + ve->siblings[0]); +} + +static int virtual_context_pin(struct intel_context *ce) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + int err; + + /* Note: we must use a real engine class for setting up reg state */ + err = __execlists_context_pin(ce, ve->siblings[0]); + if (err) + return err; + + virtual_engine_initial_hint(ve); + return 0; +} + +static void virtual_context_enter(struct intel_context *ce) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + unsigned int n; + + for (n = 0; n < ve->num_siblings; n++) + intel_engine_pm_get(ve->siblings[n]); + + intel_timeline_enter(ce->timeline); +} + +static void virtual_context_exit(struct intel_context *ce) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + unsigned int n; + + intel_timeline_exit(ce->timeline); + + for (n = 0; n < ve->num_siblings; n++) + intel_engine_pm_put(ve->siblings[n]); +} + +static const struct intel_context_ops virtual_context_ops = { + .pin = virtual_context_pin, + .unpin = execlists_context_unpin, + + .enter = virtual_context_enter, + .exit = virtual_context_exit, + + .destroy = virtual_context_destroy, +}; + +static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) +{ + struct i915_request *rq; + intel_engine_mask_t mask; + + rq = READ_ONCE(ve->request); + if (!rq) + return 0; + + /* The rq is ready for submission; rq->execution_mask is now stable. */ + mask = rq->execution_mask; + if (unlikely(!mask)) { + /* Invalid selection, submit to a random engine in error */ + i915_request_skip(rq, -ENODEV); + mask = ve->siblings[0]->mask; + } + + GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n", + ve->base.name, + rq->fence.context, rq->fence.seqno, + mask, ve->base.execlists.queue_priority_hint); + + return mask; +} + +static void virtual_submission_tasklet(unsigned long data) +{ + struct virtual_engine * const ve = (struct virtual_engine *)data; + const int prio = ve->base.execlists.queue_priority_hint; + intel_engine_mask_t mask; + unsigned int n; + + rcu_read_lock(); + mask = virtual_submission_mask(ve); + rcu_read_unlock(); + if (unlikely(!mask)) + return; + + local_irq_disable(); + for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) { + struct intel_engine_cs *sibling = ve->siblings[n]; + struct ve_node * const node = &ve->nodes[sibling->id]; + struct rb_node **parent, *rb; + bool first; + + if (unlikely(!(mask & sibling->mask))) { + if (!RB_EMPTY_NODE(&node->rb)) { + spin_lock(&sibling->active.lock); + rb_erase_cached(&node->rb, + &sibling->execlists.virtual); + RB_CLEAR_NODE(&node->rb); + spin_unlock(&sibling->active.lock); + } + continue; + } + + spin_lock(&sibling->active.lock); + + if (!RB_EMPTY_NODE(&node->rb)) { + /* + * Cheat and avoid rebalancing the tree if we can + * reuse this node in situ. + */ + first = rb_first_cached(&sibling->execlists.virtual) == + &node->rb; + if (prio == node->prio || (prio > node->prio && first)) + goto submit_engine; + + rb_erase_cached(&node->rb, &sibling->execlists.virtual); + } + + rb = NULL; + first = true; + parent = &sibling->execlists.virtual.rb_root.rb_node; + while (*parent) { + struct ve_node *other; + + rb = *parent; + other = rb_entry(rb, typeof(*other), rb); + if (prio > other->prio) { + parent = &rb->rb_left; + } else { + parent = &rb->rb_right; + first = false; + } + } + + rb_link_node(&node->rb, rb, parent); + rb_insert_color_cached(&node->rb, + &sibling->execlists.virtual, + first); + +submit_engine: + GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); + node->prio = prio; + if (first && prio > sibling->execlists.queue_priority_hint) { + sibling->execlists.queue_priority_hint = prio; + tasklet_hi_schedule(&sibling->execlists.tasklet); + } + + spin_unlock(&sibling->active.lock); + } + local_irq_enable(); +} + +static void virtual_submit_request(struct i915_request *rq) +{ + struct virtual_engine *ve = to_virtual_engine(rq->engine); + struct i915_request *old; + unsigned long flags; + + GEM_TRACE("%s: rq=%llx:%lld\n", + ve->base.name, + rq->fence.context, + rq->fence.seqno); + + GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); + + spin_lock_irqsave(&ve->base.active.lock, flags); + + old = ve->request; + if (old) { /* background completion event from preempt-to-busy */ + GEM_BUG_ON(!i915_request_completed(old)); + __i915_request_submit(old); + i915_request_put(old); + } + + if (i915_request_completed(rq)) { + __i915_request_submit(rq); + + ve->base.execlists.queue_priority_hint = INT_MIN; + ve->request = NULL; + } else { + ve->base.execlists.queue_priority_hint = rq_prio(rq); + ve->request = i915_request_get(rq); + + GEM_BUG_ON(!list_empty(virtual_queue(ve))); + list_move_tail(&rq->sched.link, virtual_queue(ve)); + + tasklet_schedule(&ve->base.execlists.tasklet); + } + + spin_unlock_irqrestore(&ve->base.active.lock, flags); +} + +static struct ve_bond * +virtual_find_bond(struct virtual_engine *ve, + const struct intel_engine_cs *master) +{ + int i; + + for (i = 0; i < ve->num_bonds; i++) { + if (ve->bonds[i].master == master) + return &ve->bonds[i]; + } + + return NULL; +} + +static void +virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) +{ + struct virtual_engine *ve = to_virtual_engine(rq->engine); + intel_engine_mask_t allowed, exec; + struct ve_bond *bond; + + allowed = ~to_request(signal)->engine->mask; + + bond = virtual_find_bond(ve, to_request(signal)->engine); + if (bond) + allowed &= bond->sibling_mask; + + /* Restrict the bonded request to run on only the available engines */ + exec = READ_ONCE(rq->execution_mask); + while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed)) + ; + + /* Prevent the master from being re-run on the bonded engines */ + to_request(signal)->execution_mask &= ~allowed; +} + +struct intel_context * +intel_execlists_create_virtual(struct i915_gem_context *ctx, + struct intel_engine_cs **siblings, + unsigned int count) +{ + struct virtual_engine *ve; + unsigned int n; + int err; + + if (count == 0) + return ERR_PTR(-EINVAL); + + if (count == 1) + return intel_context_create(ctx, siblings[0]); + + ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); + if (!ve) + return ERR_PTR(-ENOMEM); + + ve->base.i915 = ctx->i915; + ve->base.gt = siblings[0]->gt; + ve->base.uncore = siblings[0]->uncore; + ve->base.id = -1; + ve->base.class = OTHER_CLASS; + ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; + ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; + + /* + * The decision on whether to submit a request using semaphores + * depends on the saturated state of the engine. We only compute + * this during HW submission of the request, and we need for this + * state to be globally applied to all requests being submitted + * to this engine. Virtual engines encompass more than one physical + * engine and so we cannot accurately tell in advance if one of those + * engines is already saturated and so cannot afford to use a semaphore + * and be pessimized in priority for doing so -- if we are the only + * context using semaphores after all other clients have stopped, we + * will be starved on the saturated system. Such a global switch for + * semaphores is less than ideal, but alas is the current compromise. + */ + ve->base.saturated = ALL_ENGINES; + + snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); + + intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); + intel_engine_init_breadcrumbs(&ve->base); + + intel_engine_init_execlists(&ve->base); + + ve->base.cops = &virtual_context_ops; + ve->base.request_alloc = execlists_request_alloc; + + ve->base.schedule = i915_schedule; + ve->base.submit_request = virtual_submit_request; + ve->base.bond_execute = virtual_bond_execute; + + INIT_LIST_HEAD(virtual_queue(ve)); + ve->base.execlists.queue_priority_hint = INT_MIN; + tasklet_init(&ve->base.execlists.tasklet, + virtual_submission_tasklet, + (unsigned long)ve); + + intel_context_init(&ve->context, ctx, &ve->base); + + for (n = 0; n < count; n++) { + struct intel_engine_cs *sibling = siblings[n]; + + GEM_BUG_ON(!is_power_of_2(sibling->mask)); + if (sibling->mask & ve->base.mask) { + DRM_DEBUG("duplicate %s entry in load balancer\n", + sibling->name); + err = -EINVAL; + goto err_put; + } + + /* + * The virtual engine implementation is tightly coupled to + * the execlists backend -- we push out request directly + * into a tree inside each physical engine. We could support + * layering if we handle cloning of the requests and + * submitting a copy into each backend. + */ + if (sibling->execlists.tasklet.func != + execlists_submission_tasklet) { + err = -ENODEV; + goto err_put; + } + + GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb)); + RB_CLEAR_NODE(&ve->nodes[sibling->id].rb); + + ve->siblings[ve->num_siblings++] = sibling; + ve->base.mask |= sibling->mask; + + /* + * All physical engines must be compatible for their emission + * functions (as we build the instructions during request + * construction and do not alter them before submission + * on the physical engine). We use the engine class as a guide + * here, although that could be refined. + */ + if (ve->base.class != OTHER_CLASS) { + if (ve->base.class != sibling->class) { + DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", + sibling->class, ve->base.class); + err = -EINVAL; + goto err_put; + } + continue; + } + + ve->base.class = sibling->class; + ve->base.uabi_class = sibling->uabi_class; + snprintf(ve->base.name, sizeof(ve->base.name), + "v%dx%d", ve->base.class, count); + ve->base.context_size = sibling->context_size; + + ve->base.emit_bb_start = sibling->emit_bb_start; + ve->base.emit_flush = sibling->emit_flush; + ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb; + ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; + ve->base.emit_fini_breadcrumb_dw = + sibling->emit_fini_breadcrumb_dw; + + ve->base.flags = sibling->flags; + } + + ve->base.flags |= I915_ENGINE_IS_VIRTUAL; + + err = __execlists_context_alloc(&ve->context, siblings[0]); + if (err) + goto err_put; + + __set_bit(CONTEXT_ALLOC_BIT, &ve->context.flags); + + return &ve->context; + +err_put: + intel_context_put(&ve->context); + return ERR_PTR(err); +} + +struct intel_context * +intel_execlists_clone_virtual(struct i915_gem_context *ctx, + struct intel_engine_cs *src) +{ + struct virtual_engine *se = to_virtual_engine(src); + struct intel_context *dst; + + dst = intel_execlists_create_virtual(ctx, + se->siblings, + se->num_siblings); + if (IS_ERR(dst)) + return dst; + + if (se->num_bonds) { + struct virtual_engine *de = to_virtual_engine(dst->engine); + + de->bonds = kmemdup(se->bonds, + sizeof(*se->bonds) * se->num_bonds, + GFP_KERNEL); + if (!de->bonds) { + intel_context_put(dst); + return ERR_PTR(-ENOMEM); + } + + de->num_bonds = se->num_bonds; + } + + return dst; +} + +int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, + const struct intel_engine_cs *master, + const struct intel_engine_cs *sibling) +{ + struct virtual_engine *ve = to_virtual_engine(engine); + struct ve_bond *bond; + int n; + + /* Sanity check the sibling is part of the virtual engine */ + for (n = 0; n < ve->num_siblings; n++) + if (sibling == ve->siblings[n]) + break; + if (n == ve->num_siblings) + return -EINVAL; + + bond = virtual_find_bond(ve, master); + if (bond) { + bond->sibling_mask |= sibling->mask; + return 0; + } + + bond = krealloc(ve->bonds, + sizeof(*bond) * (ve->num_bonds + 1), + GFP_KERNEL); + if (!bond) + return -ENOMEM; + + bond[ve->num_bonds].master = master; + bond[ve->num_bonds].sibling_mask = sibling->mask; + + ve->bonds = bond; + ve->num_bonds++; + + return 0; +} + +struct intel_engine_cs * +intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, + unsigned int sibling) +{ + struct virtual_engine *ve = to_virtual_engine(engine); + + if (sibling >= ve->num_siblings) + return NULL; + + return ve->siblings[sibling]; +} + +void intel_execlists_show_requests(struct intel_engine_cs *engine, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + struct i915_request *rq, + const char *prefix), + unsigned int max) +{ + const struct intel_engine_execlists *execlists = &engine->execlists; + struct i915_request *rq, *last; + unsigned long flags; + unsigned int count; + struct rb_node *rb; + + spin_lock_irqsave(&engine->active.lock, flags); + + last = NULL; + count = 0; + list_for_each_entry(rq, &engine->active.requests, sched.link) { + if (count++ < max - 1) + show_request(m, rq, "\t\tE "); + else + last = rq; + } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d executing requests...\n", + count - max); + } + show_request(m, last, "\t\tE "); + } + + last = NULL; + count = 0; + if (execlists->queue_priority_hint != INT_MIN) + drm_printf(m, "\t\tQueue priority hint: %d\n", + execlists->queue_priority_hint); + for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { + struct i915_priolist *p = rb_entry(rb, typeof(*p), node); + int i; + + priolist_for_each_request(rq, p, i) { + if (count++ < max - 1) + show_request(m, rq, "\t\tQ "); + else + last = rq; + } + } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d queued requests...\n", + count - max); + } + show_request(m, last, "\t\tQ "); + } + + last = NULL; + count = 0; + for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) { + struct virtual_engine *ve = + rb_entry(rb, typeof(*ve), nodes[engine->id].rb); + struct i915_request *rq = READ_ONCE(ve->request); + + if (rq) { + if (count++ < max - 1) + show_request(m, rq, "\t\tV "); + else + last = rq; + } + } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d virtual requests...\n", + count - max); + } + show_request(m, last, "\t\tV "); + } + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +void intel_lr_context_reset(struct intel_engine_cs *engine, + struct intel_context *ce, + u32 head, + bool scrub) +{ + GEM_BUG_ON(!intel_context_is_pinned(ce)); + + /* + * We want a simple context + ring to execute the breadcrumb update. + * We cannot rely on the context being intact across the GPU hang, + * so clear it and rebuild just what we need for the breadcrumb. + * All pending requests for this context will be zapped, and any + * future request will be after userspace has had the opportunity + * to recreate its own state. + */ + if (scrub) + restore_default_state(ce, engine); + + /* Rerun the request; its payload has been neutered (if guilty). */ + ce->ring->head = head; + intel_ring_update_space(ce->ring); + + __execlists_update_reg_state(ce, engine); +} + +bool +intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine) +{ + return engine->set_default_submission == + intel_execlists_set_default_submission; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_lrc.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h new file mode 100644 index 000000000000..04511d8ebdc1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -0,0 +1,133 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INTEL_LRC_H_ +#define _INTEL_LRC_H_ + +#include <linux/types.h> + +struct drm_printer; + +struct drm_i915_private; +struct i915_gem_context; +struct i915_request; +struct intel_context; +struct intel_engine_cs; + +/* Execlists regs */ +#define RING_ELSP(base) _MMIO((base) + 0x230) +#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234) +#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4) +#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244) +#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) +#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) +#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) +#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2) +#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8) +#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) +#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) +#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) + +#define EL_CTRL_LOAD (1 << 0) + +/* The docs specify that the write pointer wraps around after 5h, "After status + * is written out to the last available status QW at offset 5h, this pointer + * wraps to 0." + * + * Therefore, one must infer than even though there are 3 bits available, 6 and + * 7 appear to be * reserved. + */ +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x7 +#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8) +#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0) + +#define GEN11_CSB_ENTRIES 12 +#define GEN11_CSB_PTR_MASK 0xf +#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8) +#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0) + +#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ +#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */ +#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ +/* in Gen12 ID 0x7FF is reserved to indicate idle */ +#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1) + +enum { + INTEL_CONTEXT_SCHEDULE_IN = 0, + INTEL_CONTEXT_SCHEDULE_OUT, + INTEL_CONTEXT_SCHEDULE_PREEMPTED, +}; + +/* Logical Rings */ +void intel_logical_ring_cleanup(struct intel_engine_cs *engine); + +int intel_execlists_submission_setup(struct intel_engine_cs *engine); +int intel_execlists_submission_init(struct intel_engine_cs *engine); + +/* Logical Ring Contexts */ +/* At the start of the context image is its per-process HWS page */ +#define LRC_PPHWSP_PN (0) +#define LRC_PPHWSP_SZ (1) +/* After the PPHWSP we have the logical state for the context */ +#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ) + +/* Space within PPHWSP reserved to be used as scratch */ +#define LRC_PPHWSP_SCRATCH 0x34 +#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32)) + +void intel_execlists_set_default_submission(struct intel_engine_cs *engine); + +void intel_lr_context_reset(struct intel_engine_cs *engine, + struct intel_context *ce, + u32 head, + bool scrub); + +void intel_execlists_show_requests(struct intel_engine_cs *engine, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + struct i915_request *rq, + const char *prefix), + unsigned int max); + +struct intel_context * +intel_execlists_create_virtual(struct i915_gem_context *ctx, + struct intel_engine_cs **siblings, + unsigned int count); + +struct intel_context * +intel_execlists_clone_virtual(struct i915_gem_context *ctx, + struct intel_engine_cs *src); + +int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, + const struct intel_engine_cs *master, + const struct intel_engine_cs *sibling); + +struct intel_engine_cs * +intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, + unsigned int sibling); + +bool +intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine); + +#endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h new file mode 100644 index 000000000000..06ab0276e10e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef _INTEL_LRC_REG_H_ +#define _INTEL_LRC_REG_H_ + +#include <linux/types.h> + +/* GEN8 to GEN11 Reg State Context */ +#define CTX_CONTEXT_CONTROL (0x02 + 1) +#define CTX_RING_HEAD (0x04 + 1) +#define CTX_RING_TAIL (0x06 + 1) +#define CTX_RING_BUFFER_START (0x08 + 1) +#define CTX_RING_BUFFER_CONTROL (0x0a + 1) +#define CTX_BB_STATE (0x10 + 1) +#define CTX_BB_PER_CTX_PTR (0x18 + 1) +#define CTX_PDP3_UDW (0x24 + 1) +#define CTX_PDP3_LDW (0x26 + 1) +#define CTX_PDP2_UDW (0x28 + 1) +#define CTX_PDP2_LDW (0x2a + 1) +#define CTX_PDP1_UDW (0x2c + 1) +#define CTX_PDP1_LDW (0x2e + 1) +#define CTX_PDP0_UDW (0x30 + 1) +#define CTX_PDP0_LDW (0x32 + 1) +#define CTX_R_PWR_CLK_STATE (0x42 + 1) + +#define GEN9_CTX_RING_MI_MODE 0x54 + +/* GEN12+ Reg State Context */ +#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1) + +#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ + u32 *reg_state__ = (reg_state); \ + const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \ + (reg_state__)[CTX_PDP ## n ## _UDW] = upper_32_bits(addr__); \ + (reg_state__)[CTX_PDP ## n ## _LDW] = lower_32_bits(addr__); \ +} while (0) + +#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \ + u32 *reg_state__ = (reg_state); \ + const u64 addr__ = px_dma(ppgtt->pd); \ + (reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \ + (reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \ +} while (0) + +#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 +#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 +#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19 +#define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A +#define GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0xD + +#endif /* _INTEL_LRC_REG_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c new file mode 100644 index 000000000000..2b977991b785 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -0,0 +1,491 @@ +/* + * Copyright (c) 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "i915_drv.h" + +#include "intel_engine.h" +#include "intel_gt.h" +#include "intel_mocs.h" +#include "intel_lrc.h" +#include "intel_ring.h" + +/* structures required */ +struct drm_i915_mocs_entry { + u32 control_value; + u16 l3cc_value; + u16 used; +}; + +struct drm_i915_mocs_table { + unsigned int size; + unsigned int n_entries; + const struct drm_i915_mocs_entry *table; +}; + +/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */ +#define _LE_CACHEABILITY(value) ((value) << 0) +#define _LE_TGT_CACHE(value) ((value) << 2) +#define LE_LRUM(value) ((value) << 4) +#define LE_AOM(value) ((value) << 6) +#define LE_RSC(value) ((value) << 7) +#define LE_SCC(value) ((value) << 8) +#define LE_PFM(value) ((value) << 11) +#define LE_SCF(value) ((value) << 14) +#define LE_COS(value) ((value) << 15) +#define LE_SSE(value) ((value) << 17) + +/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */ +#define L3_ESC(value) ((value) << 0) +#define L3_SCC(value) ((value) << 1) +#define _L3_CACHEABILITY(value) ((value) << 4) + +/* Helper defines */ +#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */ +#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ + +/* (e)LLC caching options */ +/* + * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means + * the same as LE_UC + */ +#define LE_0_PAGETABLE _LE_CACHEABILITY(0) +#define LE_1_UC _LE_CACHEABILITY(1) +#define LE_2_WT _LE_CACHEABILITY(2) +#define LE_3_WB _LE_CACHEABILITY(3) + +/* Target cache */ +#define LE_TC_0_PAGETABLE _LE_TGT_CACHE(0) +#define LE_TC_1_LLC _LE_TGT_CACHE(1) +#define LE_TC_2_LLC_ELLC _LE_TGT_CACHE(2) +#define LE_TC_3_LLC_ELLC_ALT _LE_TGT_CACHE(3) + +/* L3 caching options */ +#define L3_0_DIRECT _L3_CACHEABILITY(0) +#define L3_1_UC _L3_CACHEABILITY(1) +#define L3_2_RESERVED _L3_CACHEABILITY(2) +#define L3_3_WB _L3_CACHEABILITY(3) + +#define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \ + [__idx] = { \ + .control_value = __control_value, \ + .l3cc_value = __l3cc_value, \ + .used = 1, \ + } + +/* + * MOCS tables + * + * These are the MOCS tables that are programmed across all the rings. + * The control value is programmed to all the rings that support the + * MOCS registers. While the l3cc_values are only programmed to the + * LNCFCMOCS0 - LNCFCMOCS32 registers. + * + * These tables are intended to be kept reasonably consistent across + * HW platforms, and for ICL+, be identical across OSes. To achieve + * that, for Icelake and above, list of entries is published as part + * of bspec. + * + * Entries not part of the following tables are undefined as far as + * userspace is concerned and shouldn't be relied upon. For Gen < 12 + * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for + * PTE and will be initialized to an invalid value. + * + * The last two entries are reserved by the hardware. For ICL+ they + * should be initialized according to bspec and never used, for older + * platforms they should never be written to. + * + * NOTE: These tables are part of bspec and defined as part of hardware + * interface for ICL+. For older platforms, they are part of kernel + * ABI. It is expected that, for specific hardware platform, existing + * entries will remain constant and the table will only be updated by + * adding new entries, filling unused positions. + */ +#define GEN9_MOCS_ENTRIES \ + MOCS_ENTRY(I915_MOCS_UNCACHED, \ + LE_1_UC | LE_TC_2_LLC_ELLC, \ + L3_1_UC), \ + MOCS_ENTRY(I915_MOCS_PTE, \ + LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \ + L3_3_WB) + +static const struct drm_i915_mocs_entry skylake_mocs_table[] = { + GEN9_MOCS_ENTRIES, + MOCS_ENTRY(I915_MOCS_CACHED, + LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), + L3_3_WB) +}; + +/* NOTE: the LE_TGT_CACHE is not used on Broxton */ +static const struct drm_i915_mocs_entry broxton_mocs_table[] = { + GEN9_MOCS_ENTRIES, + MOCS_ENTRY(I915_MOCS_CACHED, + LE_1_UC | LE_TC_2_LLC_ELLC | LE_LRUM(3), + L3_3_WB) +}; + +#define GEN11_MOCS_ENTRIES \ + /* Entries 0 and 1 are defined per-platform */ \ + /* Base - L3 + LLC */ \ + MOCS_ENTRY(2, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ + L3_3_WB), \ + /* Base - Uncached */ \ + MOCS_ENTRY(3, \ + LE_1_UC | LE_TC_1_LLC, \ + L3_1_UC), \ + /* Base - L3 */ \ + MOCS_ENTRY(4, \ + LE_1_UC | LE_TC_1_LLC, \ + L3_3_WB), \ + /* Base - LLC */ \ + MOCS_ENTRY(5, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ + L3_1_UC), \ + /* Age 0 - LLC */ \ + MOCS_ENTRY(6, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \ + L3_1_UC), \ + /* Age 0 - L3 + LLC */ \ + MOCS_ENTRY(7, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \ + L3_3_WB), \ + /* Age: Don't Chg. - LLC */ \ + MOCS_ENTRY(8, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \ + L3_1_UC), \ + /* Age: Don't Chg. - L3 + LLC */ \ + MOCS_ENTRY(9, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \ + L3_3_WB), \ + /* No AOM - LLC */ \ + MOCS_ENTRY(10, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \ + L3_1_UC), \ + /* No AOM - L3 + LLC */ \ + MOCS_ENTRY(11, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \ + L3_3_WB), \ + /* No AOM; Age 0 - LLC */ \ + MOCS_ENTRY(12, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \ + L3_1_UC), \ + /* No AOM; Age 0 - L3 + LLC */ \ + MOCS_ENTRY(13, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \ + L3_3_WB), \ + /* No AOM; Age:DC - LLC */ \ + MOCS_ENTRY(14, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \ + L3_1_UC), \ + /* No AOM; Age:DC - L3 + LLC */ \ + MOCS_ENTRY(15, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \ + L3_3_WB), \ + /* Self-Snoop - L3 + LLC */ \ + MOCS_ENTRY(18, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(12.5%) */ \ + MOCS_ENTRY(19, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(25%) */ \ + MOCS_ENTRY(20, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(50%) */ \ + MOCS_ENTRY(21, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(75%) */ \ + MOCS_ENTRY(22, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(87.5%) */ \ + MOCS_ENTRY(23, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7), \ + L3_3_WB), \ + /* HW Reserved - SW program but never use */ \ + MOCS_ENTRY(62, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ + L3_1_UC), \ + /* HW Reserved - SW program but never use */ \ + MOCS_ENTRY(63, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ + L3_1_UC) + +static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = { + /* Base - Error (Reserved for Non-Use) */ + MOCS_ENTRY(0, 0x0, 0x0), + /* Base - Reserved */ + MOCS_ENTRY(1, 0x0, 0x0), + + GEN11_MOCS_ENTRIES, + + /* Implicitly enable L1 - HDC:L1 + L3 + LLC */ + MOCS_ENTRY(48, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_3_WB), + /* Implicitly enable L1 - HDC:L1 + L3 */ + MOCS_ENTRY(49, + LE_1_UC | LE_TC_1_LLC, + L3_3_WB), + /* Implicitly enable L1 - HDC:L1 + LLC */ + MOCS_ENTRY(50, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC), + /* Implicitly enable L1 - HDC:L1 */ + MOCS_ENTRY(51, + LE_1_UC | LE_TC_1_LLC, + L3_1_UC), + /* HW Special Case (CCS) */ + MOCS_ENTRY(60, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC), + /* HW Special Case (Displayable) */ + MOCS_ENTRY(61, + LE_1_UC | LE_TC_1_LLC, + L3_3_WB), +}; + +static const struct drm_i915_mocs_entry icelake_mocs_table[] = { + /* Base - Uncached (Deprecated) */ + MOCS_ENTRY(I915_MOCS_UNCACHED, + LE_1_UC | LE_TC_1_LLC, + L3_1_UC), + /* Base - L3 + LeCC:PAT (Deprecated) */ + MOCS_ENTRY(I915_MOCS_PTE, + LE_0_PAGETABLE | LE_TC_1_LLC, + L3_3_WB), + + GEN11_MOCS_ENTRIES +}; + +static bool get_mocs_settings(const struct drm_i915_private *i915, + struct drm_i915_mocs_table *table) +{ + bool result = false; + + if (INTEL_GEN(i915) >= 12) { + table->size = ARRAY_SIZE(tigerlake_mocs_table); + table->table = tigerlake_mocs_table; + table->n_entries = GEN11_NUM_MOCS_ENTRIES; + result = true; + } else if (IS_GEN(i915, 11)) { + table->size = ARRAY_SIZE(icelake_mocs_table); + table->table = icelake_mocs_table; + table->n_entries = GEN11_NUM_MOCS_ENTRIES; + result = true; + } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { + table->size = ARRAY_SIZE(skylake_mocs_table); + table->n_entries = GEN9_NUM_MOCS_ENTRIES; + table->table = skylake_mocs_table; + result = true; + } else if (IS_GEN9_LP(i915)) { + table->size = ARRAY_SIZE(broxton_mocs_table); + table->n_entries = GEN9_NUM_MOCS_ENTRIES; + table->table = broxton_mocs_table; + result = true; + } else { + WARN_ONCE(INTEL_GEN(i915) >= 9, + "Platform that should have a MOCS table does not.\n"); + } + + /* WaDisableSkipCaching:skl,bxt,kbl,glk */ + if (IS_GEN(i915, 9)) { + int i; + + for (i = 0; i < table->size; i++) + if (WARN_ON(table->table[i].l3cc_value & + (L3_ESC(1) | L3_SCC(0x7)))) + return false; + } + + return result; +} + +static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index) +{ + switch (engine->id) { + case RCS0: + return GEN9_GFX_MOCS(index); + case VCS0: + return GEN9_MFX0_MOCS(index); + case BCS0: + return GEN9_BLT_MOCS(index); + case VECS0: + return GEN9_VEBOX_MOCS(index); + case VCS1: + return GEN9_MFX1_MOCS(index); + case VCS2: + return GEN11_MFX2_MOCS(index); + default: + MISSING_CASE(engine->id); + return INVALID_MMIO_REG; + } +} + +/* + * Get control_value from MOCS entry taking into account when it's not used: + * I915_MOCS_PTE's value is returned in this case. + */ +static u32 get_entry_control(const struct drm_i915_mocs_table *table, + unsigned int index) +{ + if (table->table[index].used) + return table->table[index].control_value; + + return table->table[I915_MOCS_PTE].control_value; +} + +static void init_mocs_table(struct intel_engine_cs *engine, + const struct drm_i915_mocs_table *table) +{ + struct intel_uncore *uncore = engine->uncore; + u32 unused_value = table->table[I915_MOCS_PTE].control_value; + unsigned int i; + + for (i = 0; i < table->size; i++) + intel_uncore_write_fw(uncore, + mocs_register(engine, i), + get_entry_control(table, i)); + + /* All remaining entries are unused */ + for (; i < table->n_entries; i++) + intel_uncore_write_fw(uncore, + mocs_register(engine, i), + unused_value); +} + +/* + * Get l3cc_value from MOCS entry taking into account when it's not used: + * I915_MOCS_PTE's value is returned in this case. + */ +static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table, + unsigned int index) +{ + if (table->table[index].used) + return table->table[index].l3cc_value; + + return table->table[I915_MOCS_PTE].l3cc_value; +} + +static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table, + u16 low, + u16 high) +{ + return low | (u32)high << 16; +} + +static void init_l3cc_table(struct intel_engine_cs *engine, + const struct drm_i915_mocs_table *table) +{ + struct intel_uncore *uncore = engine->uncore; + u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value; + unsigned int i; + + for (i = 0; i < table->size / 2; i++) { + u16 low = get_entry_l3cc(table, 2 * i); + u16 high = get_entry_l3cc(table, 2 * i + 1); + + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(table, low, high)); + } + + /* Odd table size - 1 left over */ + if (table->size & 1) { + u16 low = get_entry_l3cc(table, 2 * i); + + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(table, low, unused_value)); + i++; + } + + /* All remaining entries are also unused */ + for (; i < table->n_entries / 2; i++) + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(table, unused_value, + unused_value)); +} + +void intel_mocs_init_engine(struct intel_engine_cs *engine) +{ + struct drm_i915_mocs_table table; + + /* Called under a blanket forcewake */ + assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); + + if (!get_mocs_settings(engine->i915, &table)) + return; + + /* Platforms with global MOCS do not need per-engine initialization. */ + if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915)) + init_mocs_table(engine, &table); + + if (engine->class == RENDER_CLASS) + init_l3cc_table(engine, &table); +} + +static void intel_mocs_init_global(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + struct drm_i915_mocs_table table; + unsigned int index; + + /* + * LLC and eDRAM control values are not applicable to dgfx + */ + if (IS_DGFX(gt->i915)) + return; + + GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915)); + + if (!get_mocs_settings(gt->i915, &table)) + return; + + if (GEM_DEBUG_WARN_ON(table.size > table.n_entries)) + return; + + for (index = 0; index < table.size; index++) + intel_uncore_write(uncore, + GEN12_GLOBAL_MOCS(index), + table.table[index].control_value); + + /* + * Ok, now set the unused entries to the invalid entry (index 0). These + * entries are officially undefined and no contract for the contents and + * settings is given for these entries. + */ + for (; index < table.n_entries; index++) + intel_uncore_write(uncore, + GEN12_GLOBAL_MOCS(index), + table.table[0].control_value); +} + +void intel_mocs_init(struct intel_gt *gt) +{ + if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915)) + intel_mocs_init_global(gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h new file mode 100644 index 000000000000..83371f3e6ba1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef INTEL_MOCS_H +#define INTEL_MOCS_H + +/** + * DOC: Memory Objects Control State (MOCS) + * + * Motivation: + * In previous Gens the MOCS settings was a value that was set by user land as + * part of the batch. In Gen9 this has changed to be a single table (per ring) + * that all batches now reference by index instead of programming the MOCS + * directly. + * + * The one wrinkle in this is that only PART of the MOCS tables are included + * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32 + * registers). The rest are not (the settings for the other rings). + * + * This table needs to be set at system start-up because the way the table + * interacts with the contexts and the GmmLib interface. + * + * + * Implementation: + * + * The tables (one per supported platform) are defined in intel_mocs.c + * and are programmed in the first batch after the context is loaded + * (with the hardware workarounds). This will then let the usual + * context handling keep the MOCS in step. + */ + +struct intel_engine_cs; +struct intel_gt; + +void intel_mocs_init(struct intel_gt *gt); +void intel_mocs_init_engine(struct intel_engine_cs *engine); + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c new file mode 100644 index 000000000000..700104b90163 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -0,0 +1,787 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/pm_runtime.h> + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_rc6.h" +#include "intel_sideband.h" + +/** + * DOC: RC6 + * + * RC6 is a special power stage which allows the GPU to enter an very + * low-voltage mode when idle, using down to 0V while at this stage. This + * stage is entered automatically when the GPU is idle when RC6 support is + * enabled, and as soon as new workload arises GPU wakes up automatically as + * well. + * + * There are different RC6 modes available in Intel GPU, which differentiate + * among each other with the latency required to enter and leave RC6 and + * voltage consumed by the GPU in different states. + * + * The combination of the following flags define which states GPU is allowed + * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and + * RC6pp is deepest RC6. Their support by hardware varies according to the + * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one + * which brings the most power savings; deeper states save more power, but + * require higher latency to switch to and wake up. + */ + +static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6) +{ + return container_of(rc6, struct intel_gt, rc6); +} + +static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc) +{ + return rc6_to_gt(rc)->uncore; +} + +static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc) +{ + return rc6_to_gt(rc)->i915; +} + +static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) +{ + intel_uncore_write_fw(uncore, reg, val); +} + +static void gen11_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* 2b: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); + set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150); + + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GUC_MAX_IDLE_COUNT, 0xA); + + set(uncore, GEN6_RC_SLEEP, 0); + + set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + + /* + * 2c: Program Coarse Power Gating Policies. + * + * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we + * use instead is a more conservative estimate for the maximum time + * it takes us to service a CS interrupt and submit a new ELSP - that + * is the time which the GPU is idle waiting for the CPU to select the + * next request to execute. If the idle hysteresis is less than that + * interrupt service latency, the hardware will automatically gate + * the power well and we will then incur the wake up cost on top of + * the service latency. A similar guide from plane_state is that we + * do not want the enable hysteresis to less than the wakeup latency. + * + * igt/gem_exec_nop/sequential provides a rough estimate for the + * service latency, and puts it around 10us for Broadwell (and other + * big core) and around 40us for Broxton (and other low power cores). + * [Note that for legacy ringbuffer submission, this is less than 1us!] + * However, the wakeup latency on Broxton is closer to 100us. To be + * conservative, we have to factor in a context switch on top (due + * to ksoftirqd). + */ + set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); + set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); + + /* 3a: Enable RC6 */ + set(uncore, GEN6_RC_CONTROL, + GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_EI_MODE(1)); + + set(uncore, GEN9_PG_ENABLE, + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE); +} + +static void gen9_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 rc6_mode; + + /* 2b: Program RC6 thresholds.*/ + if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) { + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); + set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150); + } else if (IS_SKYLAKE(rc6_to_i915(rc6))) { + /* + * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only + * when CPG is enabled + */ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); + } else { + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); + } + + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GUC_MAX_IDLE_COUNT, 0xA); + + set(uncore, GEN6_RC_SLEEP, 0); + + /* + * 2c: Program Coarse Power Gating Policies. + * + * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we + * use instead is a more conservative estimate for the maximum time + * it takes us to service a CS interrupt and submit a new ELSP - that + * is the time which the GPU is idle waiting for the CPU to select the + * next request to execute. If the idle hysteresis is less than that + * interrupt service latency, the hardware will automatically gate + * the power well and we will then incur the wake up cost on top of + * the service latency. A similar guide from plane_state is that we + * do not want the enable hysteresis to less than the wakeup latency. + * + * igt/gem_exec_nop/sequential provides a rough estimate for the + * service latency, and puts it around 10us for Broadwell (and other + * big core) and around 40us for Broxton (and other low power cores). + * [Note that for legacy ringbuffer submission, this is less than 1us!] + * However, the wakeup latency on Broxton is closer to 100us. To be + * conservative, we have to factor in a context switch on top (due + * to ksoftirqd). + */ + set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); + set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); + + /* 3a: Enable RC6 */ + set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ + + /* WaRsUseTimeoutMode:cnl (pre-prod) */ + if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0)) + rc6_mode = GEN7_RC_CTL_TO_MODE; + else + rc6_mode = GEN6_RC_CTL_EI_MODE(1); + + set(uncore, GEN6_RC_CONTROL, + GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + rc6_mode); + + /* + * WaRsDisableCoarsePowerGating:skl,cnl + * - Render/Media PG need to be disabled with RC6. + */ + if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6))) + set(uncore, GEN9_PG_ENABLE, + GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE); +} + +static void gen8_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* 2b: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + set(uncore, GEN6_RC_SLEEP, 0); + set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ + + /* 3: Enable RC6 */ + set(uncore, GEN6_RC_CONTROL, + GEN6_RC_CTL_HW_ENABLE | + GEN7_RC_CTL_TO_MODE | + GEN6_RC_CTL_RC6_ENABLE); +} + +static void gen6_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 rc6vids, rc6_mask; + int ret; + + set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); + set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); + + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GEN6_RC_SLEEP, 0); + set(uncore, GEN6_RC1e_THRESHOLD, 1000); + if (IS_IVYBRIDGE(i915)) + set(uncore, GEN6_RC6_THRESHOLD, 125000); + else + set(uncore, GEN6_RC6_THRESHOLD, 50000); + set(uncore, GEN6_RC6p_THRESHOLD, 150000); + set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */ + + /* We don't use those on Haswell */ + rc6_mask = GEN6_RC_CTL_RC6_ENABLE; + if (HAS_RC6p(i915)) + rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; + if (HAS_RC6pp(i915)) + rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; + set(uncore, GEN6_RC_CONTROL, + rc6_mask | + GEN6_RC_CTL_EI_MODE(1) | + GEN6_RC_CTL_HW_ENABLE); + + rc6vids = 0; + ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, + &rc6vids, NULL); + if (IS_GEN(i915, 6) && ret) { + DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); + } else if (IS_GEN(i915, 6) && + (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { + DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", + GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); + rc6vids &= 0xffff00; + rc6vids |= GEN6_ENCODE_RC6_VID(450); + ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); + if (ret) + DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); + } +} + +/* Check that the pcbr address is not empty. */ +static int chv_rc6_init(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + resource_size_t pctx_paddr, paddr; + resource_size_t pctx_size = 32 * SZ_1K; + u32 pcbr; + + pcbr = intel_uncore_read(uncore, VLV_PCBR); + if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { + DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); + paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size; + GEM_BUG_ON(paddr > U32_MAX); + + pctx_paddr = (paddr & ~4095); + intel_uncore_write(uncore, VLV_PCBR, pctx_paddr); + } + + return 0; +} + +static int vlv_rc6_init(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_gem_object *pctx; + resource_size_t pctx_paddr; + resource_size_t pctx_size = 24 * SZ_1K; + u32 pcbr; + + pcbr = intel_uncore_read(uncore, VLV_PCBR); + if (pcbr) { + /* BIOS set it up already, grab the pre-alloc'd space */ + resource_size_t pcbr_offset; + + pcbr_offset = (pcbr & ~4095) - i915->dsm.start; + pctx = i915_gem_object_create_stolen_for_preallocated(i915, + pcbr_offset, + I915_GTT_OFFSET_NONE, + pctx_size); + if (IS_ERR(pctx)) + return PTR_ERR(pctx); + + goto out; + } + + DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); + + /* + * From the Gunit register HAS: + * The Gfx driver is expected to program this register and ensure + * proper allocation within Gfx stolen memory. For example, this + * register should be programmed such than the PCBR range does not + * overlap with other ranges, such as the frame buffer, protected + * memory, or any other relevant ranges. + */ + pctx = i915_gem_object_create_stolen(i915, pctx_size); + if (IS_ERR(pctx)) { + DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); + return PTR_ERR(pctx); + } + + GEM_BUG_ON(range_overflows_t(u64, + i915->dsm.start, + pctx->stolen->start, + U32_MAX)); + pctx_paddr = i915->dsm.start + pctx->stolen->start; + intel_uncore_write(uncore, VLV_PCBR, pctx_paddr); + +out: + rc6->pctx = pctx; + return 0; +} + +static void chv_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* 2a: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + set(uncore, GEN6_RC_SLEEP, 0); + + /* TO threshold set to 500 us (0x186 * 1.28 us) */ + set(uncore, GEN6_RC6_THRESHOLD, 0x186); + + /* Allows RC6 residency counter to work */ + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + VLV_MEDIA_RC6_COUNT_EN | + VLV_RENDER_RC6_COUNT_EN)); + + /* 3: Enable RC6 */ + set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE); +} + +static void vlv_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); + + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GEN6_RC6_THRESHOLD, 0x557); + + /* Allows RC6 residency counter to work */ + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + VLV_MEDIA_RC0_COUNT_EN | + VLV_RENDER_RC0_COUNT_EN | + VLV_MEDIA_RC6_COUNT_EN | + VLV_RENDER_RC6_COUNT_EN)); + + set(uncore, GEN6_RC_CONTROL, + GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL); +} + +static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_private *i915 = rc6_to_i915(rc6); + u32 rc6_ctx_base, rc_ctl, rc_sw_target; + bool enable_rc6 = true; + + rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL); + rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE); + rc_sw_target &= RC_SW_TARGET_STATE_MASK; + rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT; + DRM_DEBUG_DRIVER("BIOS enabled RC states: " + "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", + onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), + onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), + rc_sw_target); + + if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) { + DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); + enable_rc6 = false; + } + + /* + * The exact context size is not known for BXT, so assume a page size + * for this check. + */ + rc6_ctx_base = + intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK; + if (!(rc6_ctx_base >= i915->dsm_reserved.start && + rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) { + DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); + enable_rc6 = false; + } + + if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) { + DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); + enable_rc6 = false; + } + + if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) || + !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) || + !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) { + DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); + enable_rc6 = false; + } + + if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) { + DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); + enable_rc6 = false; + } + + if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) { + DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); + enable_rc6 = false; + } + + return enable_rc6; +} + +static bool rc6_supported(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!HAS_RC6(i915)) + return false; + + if (intel_vgpu_active(i915)) + return false; + + if (is_mock_gt(rc6_to_gt(rc6))) + return false; + + if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) { + dev_notice(i915->drm.dev, + "RC6 and powersaving disabled by BIOS\n"); + return false; + } + + return true; +} + +static void rpm_get(struct intel_rc6 *rc6) +{ + GEM_BUG_ON(rc6->wakeref); + pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev); + rc6->wakeref = true; +} + +static void rpm_put(struct intel_rc6 *rc6) +{ + GEM_BUG_ON(!rc6->wakeref); + pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev); + rc6->wakeref = false; +} + +static bool intel_rc6_ctx_corrupted(struct intel_rc6 *rc6) +{ + return !intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO); +} + +static void intel_rc6_ctx_wa_init(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return; + + if (intel_rc6_ctx_corrupted(rc6)) { + DRM_INFO("RC6 context corrupted, disabling runtime power management\n"); + rc6->ctx_corrupted = true; + } +} + +/** + * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA + * @rc6: rc6 state + * + * Perform any steps needed to re-init the RC6 CTX WA after system resume. + */ +void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6) +{ + if (rc6->ctx_corrupted && !intel_rc6_ctx_corrupted(rc6)) { + DRM_INFO("RC6 context restored, re-enabling runtime power management\n"); + rc6->ctx_corrupted = false; + } +} + +/** + * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption + * @rc6: rc6 state + * + * Check if an RC6 CTX corruption has happened since the last check and if so + * disable RC6 and runtime power management. +*/ +void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return; + + if (rc6->ctx_corrupted) + return; + + if (!intel_rc6_ctx_corrupted(rc6)) + return; + + DRM_NOTE("RC6 context corruption, disabling runtime power management\n"); + + intel_rc6_disable(rc6); + rc6->ctx_corrupted = true; + + return; +} + +static void __intel_rc6_disable(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + if (INTEL_GEN(i915) >= 9) + set(uncore, GEN9_PG_ENABLE, 0); + set(uncore, GEN6_RC_CONTROL, 0); + set(uncore, GEN6_RC_STATE, 0); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); +} + +void intel_rc6_init(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + int err; + + /* Disable runtime-pm until we can save the GPU state with rc6 pctx */ + rpm_get(rc6); + + if (!rc6_supported(rc6)) + return; + + intel_rc6_ctx_wa_init(rc6); + + if (IS_CHERRYVIEW(i915)) + err = chv_rc6_init(rc6); + else if (IS_VALLEYVIEW(i915)) + err = vlv_rc6_init(rc6); + else + err = 0; + + /* Sanitize rc6, ensure it is disabled before we are ready. */ + __intel_rc6_disable(rc6); + + rc6->supported = err == 0; +} + +void intel_rc6_sanitize(struct intel_rc6 *rc6) +{ + if (rc6->enabled) { /* unbalanced suspend/resume */ + rpm_get(rc6); + rc6->enabled = false; + } + + if (rc6->supported) + __intel_rc6_disable(rc6); +} + +void intel_rc6_enable(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + + if (!rc6->supported) + return; + + GEM_BUG_ON(rc6->enabled); + + if (rc6->ctx_corrupted) + return; + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + if (IS_CHERRYVIEW(i915)) + chv_rc6_enable(rc6); + else if (IS_VALLEYVIEW(i915)) + vlv_rc6_enable(rc6); + else if (INTEL_GEN(i915) >= 11) + gen11_rc6_enable(rc6); + else if (INTEL_GEN(i915) >= 9) + gen9_rc6_enable(rc6); + else if (IS_BROADWELL(i915)) + gen8_rc6_enable(rc6); + else if (INTEL_GEN(i915) >= 6) + gen6_rc6_enable(rc6); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + /* rc6 is ready, runtime-pm is go! */ + rpm_put(rc6); + rc6->enabled = true; +} + +void intel_rc6_disable(struct intel_rc6 *rc6) +{ + if (!rc6->enabled) + return; + + rpm_get(rc6); + rc6->enabled = false; + + __intel_rc6_disable(rc6); +} + +void intel_rc6_fini(struct intel_rc6 *rc6) +{ + struct drm_i915_gem_object *pctx; + + intel_rc6_disable(rc6); + + pctx = fetch_and_zero(&rc6->pctx); + if (pctx) + i915_gem_object_put(pctx); + + if (rc6->wakeref) + rpm_put(rc6); +} + +static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg) +{ + u32 lower, upper, tmp; + int loop = 2; + + /* + * The register accessed do not need forcewake. We borrow + * uncore lock to prevent concurrent access to range reg. + */ + lockdep_assert_held(&uncore->lock); + + /* + * vlv and chv residency counters are 40 bits in width. + * With a control bit, we can choose between upper or lower + * 32bit window into this counter. + * + * Although we always use the counter in high-range mode elsewhere, + * userspace may attempt to read the value before rc6 is initialised, + * before we have set the default VLV_COUNTER_CONTROL value. So always + * set the high bit to be safe. + */ + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); + upper = intel_uncore_read_fw(uncore, reg); + do { + tmp = upper; + + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH)); + lower = intel_uncore_read_fw(uncore, reg); + + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); + upper = intel_uncore_read_fw(uncore, reg); + } while (upper != tmp && --loop); + + /* + * Everywhere else we always use VLV_COUNTER_CONTROL with the + * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set + * now. + */ + + return lower | (u64)upper << 8; +} + +u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + u64 time_hw, prev_hw, overflow_hw; + unsigned int fw_domains; + unsigned long flags; + unsigned int i; + u32 mul, div; + + if (!rc6->supported) + return 0; + + /* + * Store previous hw counter values for counter wrap-around handling. + * + * There are only four interesting registers and they live next to each + * other so we can use the relative address, compared to the smallest + * one as the index into driver storage. + */ + i = (i915_mmio_reg_offset(reg) - + i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32); + if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency))) + return 0; + + fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); + + spin_lock_irqsave(&uncore->lock, flags); + intel_uncore_forcewake_get__locked(uncore, fw_domains); + + /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + mul = 1000000; + div = i915->czclk_freq; + overflow_hw = BIT_ULL(40); + time_hw = vlv_residency_raw(uncore, reg); + } else { + /* 833.33ns units on Gen9LP, 1.28us elsewhere. */ + if (IS_GEN9_LP(i915)) { + mul = 10000; + div = 12; + } else { + mul = 1280; + div = 1; + } + + overflow_hw = BIT_ULL(32); + time_hw = intel_uncore_read_fw(uncore, reg); + } + + /* + * Counter wrap handling. + * + * But relying on a sufficient frequency of queries otherwise counters + * can still wrap. + */ + prev_hw = rc6->prev_hw_residency[i]; + rc6->prev_hw_residency[i] = time_hw; + + /* RC6 delta from last sample. */ + if (time_hw >= prev_hw) + time_hw -= prev_hw; + else + time_hw += overflow_hw - prev_hw; + + /* Add delta to RC6 extended raw driver copy. */ + time_hw += rc6->cur_residency[i]; + rc6->cur_residency[i] = time_hw; + + intel_uncore_forcewake_put__locked(uncore, fw_domains); + spin_unlock_irqrestore(&uncore->lock, flags); + + return mul_u64_u32_div(time_hw, mul, div); +} + +u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg) +{ + return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000); +} diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h new file mode 100644 index 000000000000..1370f6834a4c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rc6.h @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RC6_H +#define INTEL_RC6_H + +#include "i915_reg.h" + +struct intel_engine_cs; +struct intel_rc6; + +void intel_rc6_init(struct intel_rc6 *rc6); +void intel_rc6_fini(struct intel_rc6 *rc6); + +void intel_rc6_sanitize(struct intel_rc6 *rc6); +void intel_rc6_enable(struct intel_rc6 *rc6); +void intel_rc6_disable(struct intel_rc6 *rc6); + +u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg); +u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg); + +void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6); +void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6); + +#endif /* INTEL_RC6_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h new file mode 100644 index 000000000000..89ad5697a8d4 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RC6_TYPES_H +#define INTEL_RC6_TYPES_H + +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "intel_engine_types.h" + +struct drm_i915_gem_object; + +struct intel_rc6 { + u64 prev_hw_residency[4]; + u64 cur_residency[4]; + + struct drm_i915_gem_object *pctx; + + bool supported : 1; + bool enabled : 1; + bool wakeref : 1; + bool ctx_corrupted : 1; +}; + +#endif /* INTEL_RC6_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c new file mode 100644 index 000000000000..c4edc35e7d89 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -0,0 +1,237 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mika Kuoppala <[email protected]> + * + */ + +#include "i915_drv.h" +#include "intel_renderstate.h" +#include "intel_ring.h" + +struct intel_renderstate { + const struct intel_renderstate_rodata *rodata; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 batch_offset; + u32 batch_size; + u32 aux_offset; + u32 aux_size; +}; + +static const struct intel_renderstate_rodata * +render_state_get_rodata(const struct intel_engine_cs *engine) +{ + if (engine->class != RENDER_CLASS) + return NULL; + + switch (INTEL_GEN(engine->i915)) { + case 6: + return &gen6_null_state; + case 7: + return &gen7_null_state; + case 8: + return &gen8_null_state; + case 9: + return &gen9_null_state; + } + + return NULL; +} + +/* + * Macro to add commands to auxiliary batch. + * This macro only checks for page overflow before inserting the commands, + * this is sufficient as the null state generator makes the final batch + * with two passes to build command and state separately. At this point + * the size of both are known and it compacts them by relocating the state + * right after the commands taking care of alignment so we should sufficient + * space below them for adding new commands. + */ +#define OUT_BATCH(batch, i, val) \ + do { \ + if ((i) >= PAGE_SIZE / sizeof(u32)) \ + goto err; \ + (batch)[(i)++] = (val); \ + } while(0) + +static int render_state_setup(struct intel_renderstate *so, + struct drm_i915_private *i915) +{ + const struct intel_renderstate_rodata *rodata = so->rodata; + unsigned int i = 0, reloc_index = 0; + unsigned int needs_clflush; + u32 *d; + int ret; + + ret = i915_gem_object_prepare_write(so->obj, &needs_clflush); + if (ret) + return ret; + + d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0)); + + while (i < rodata->batch_items) { + u32 s = rodata->batch[i]; + + if (i * 4 == rodata->reloc[reloc_index]) { + u64 r = s + so->vma->node.start; + s = lower_32_bits(r); + if (HAS_64BIT_RELOC(i915)) { + if (i + 1 >= rodata->batch_items || + rodata->batch[i + 1] != 0) + goto err; + + d[i++] = s; + s = upper_32_bits(r); + } + + reloc_index++; + } + + d[i++] = s; + } + + if (rodata->reloc[reloc_index] != -1) { + DRM_ERROR("only %d relocs resolved\n", reloc_index); + goto err; + } + + so->batch_offset = i915_ggtt_offset(so->vma); + so->batch_size = rodata->batch_items * sizeof(u32); + + while (i % CACHELINE_DWORDS) + OUT_BATCH(d, i, MI_NOOP); + + so->aux_offset = i * sizeof(u32); + + if (HAS_POOLED_EU(i915)) { + /* + * We always program 3x6 pool config but depending upon which + * subslice is disabled HW drops down to appropriate config + * shown below. + * + * In the below table 2x6 config always refers to + * fused-down version, native 2x6 is not available and can + * be ignored + * + * SNo subslices config eu pool configuration + * ----------------------------------------------------------- + * 1 3 subslices enabled (3x6) - 0x00777000 (9+9) + * 2 ss0 disabled (2x6) - 0x00777000 (3+9) + * 3 ss1 disabled (2x6) - 0x00770000 (6+6) + * 4 ss2 disabled (2x6) - 0x00007000 (9+3) + */ + u32 eu_pool_config = 0x00777000; + + OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE); + OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE); + OUT_BATCH(d, i, eu_pool_config); + OUT_BATCH(d, i, 0); + OUT_BATCH(d, i, 0); + OUT_BATCH(d, i, 0); + } + + OUT_BATCH(d, i, MI_BATCH_BUFFER_END); + so->aux_size = i * sizeof(u32) - so->aux_offset; + so->aux_offset += so->batch_offset; + /* + * Since we are sending length, we need to strictly conform to + * all requirements. For Gen2 this must be a multiple of 8. + */ + so->aux_size = ALIGN(so->aux_size, 8); + + if (needs_clflush) + drm_clflush_virt_range(d, i * sizeof(u32)); + kunmap_atomic(d); + + ret = 0; +out: + i915_gem_object_finish_access(so->obj); + return ret; + +err: + kunmap_atomic(d); + ret = -EINVAL; + goto out; +} + +#undef OUT_BATCH + +int intel_renderstate_emit(struct i915_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + struct intel_renderstate so = {}; /* keep the compiler happy */ + int err; + + so.rodata = render_state_get_rodata(engine); + if (!so.rodata) + return 0; + + if (so.rodata->batch_items * 4 > PAGE_SIZE) + return -EINVAL; + + so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); + if (IS_ERR(so.obj)) + return PTR_ERR(so.obj); + + so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(so.vma)) { + err = PTR_ERR(so.vma); + goto err_obj; + } + + err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) + goto err_vma; + + err = render_state_setup(&so, rq->i915); + if (err) + goto err_unpin; + + err = engine->emit_bb_start(rq, + so.batch_offset, so.batch_size, + I915_DISPATCH_SECURE); + if (err) + goto err_unpin; + + if (so.aux_size > 8) { + err = engine->emit_bb_start(rq, + so.aux_offset, so.aux_size, + I915_DISPATCH_SECURE); + if (err) + goto err_unpin; + } + + i915_vma_lock(so.vma); + err = i915_request_await_object(rq, so.vma->obj, false); + if (err == 0) + err = i915_vma_move_to_active(so.vma, rq, 0); + i915_vma_unlock(so.vma); +err_unpin: + i915_vma_unpin(so.vma); +err_vma: + i915_vma_close(so.vma); +err_obj: + i915_gem_object_put(so.obj); + return err; +} diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h new file mode 100644 index 000000000000..8d5079145054 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INTEL_RENDERSTATE_H_ +#define _INTEL_RENDERSTATE_H_ + +#include <linux/types.h> + +struct i915_request; + +struct intel_renderstate_rodata { + const u32 *reloc; + const u32 *batch; + const u32 batch_items; +}; + +#define RO_RENDERSTATE(_g) \ + const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ + .reloc = gen ## _g ## _null_state_relocs, \ + .batch = gen ## _g ## _null_state_batch, \ + .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \ + } + +extern const struct intel_renderstate_rodata gen6_null_state; +extern const struct intel_renderstate_rodata gen7_null_state; +extern const struct intel_renderstate_rodata gen8_null_state; +extern const struct intel_renderstate_rodata gen9_null_state; + +int intel_renderstate_emit(struct i915_request *rq); + +#endif /* _INTEL_RENDERSTATE_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c new file mode 100644 index 000000000000..f03e000051c1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -0,0 +1,1355 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008-2018 Intel Corporation + */ + +#include <linux/sched/mm.h> +#include <linux/stop_machine.h> + +#include "display/intel_display_types.h" +#include "display/intel_overlay.h" + +#include "gem/i915_gem_context.h" + +#include "i915_drv.h" +#include "i915_gpu_error.h" +#include "i915_irq.h" +#include "intel_engine_pm.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_reset.h" + +#include "uc/intel_guc.h" + +#define RESET_MAX_RETRIES 3 + +/* XXX How to handle concurrent GGTT updates using tiling registers? */ +#define RESET_UNDER_STOP_MACHINE 0 + +static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set) +{ + intel_uncore_rmw_fw(uncore, reg, 0, set); +} + +static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) +{ + intel_uncore_rmw_fw(uncore, reg, clr, 0); +} + +static void engine_skip_context(struct i915_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + struct i915_gem_context *hung_ctx = rq->gem_context; + + if (!i915_request_is_active(rq)) + return; + + lockdep_assert_held(&engine->active.lock); + list_for_each_entry_continue(rq, &engine->active.requests, sched.link) + if (rq->gem_context == hung_ctx) + i915_request_skip(rq, -EIO); +} + +static void client_mark_guilty(struct drm_i915_file_private *file_priv, + const struct i915_gem_context *ctx) +{ + unsigned int score; + unsigned long prev_hang; + + if (i915_gem_context_is_banned(ctx)) + score = I915_CLIENT_SCORE_CONTEXT_BAN; + else + score = 0; + + prev_hang = xchg(&file_priv->hang_timestamp, jiffies); + if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) + score += I915_CLIENT_SCORE_HANG_FAST; + + if (score) { + atomic_add(score, &file_priv->ban_score); + + DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", + ctx->name, score, + atomic_read(&file_priv->ban_score)); + } +} + +static bool context_mark_guilty(struct i915_gem_context *ctx) +{ + unsigned long prev_hang; + bool banned; + int i; + + atomic_inc(&ctx->guilty_count); + + /* Cool contexts are too cool to be banned! (Used for reset testing.) */ + if (!i915_gem_context_is_bannable(ctx)) + return false; + + /* Record the timestamp for the last N hangs */ + prev_hang = ctx->hang_timestamp[0]; + for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++) + ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1]; + ctx->hang_timestamp[i] = jiffies; + + /* If we have hung N+1 times in rapid succession, we ban the context! */ + banned = !i915_gem_context_is_recoverable(ctx); + if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES)) + banned = true; + if (banned) { + DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n", + ctx->name, atomic_read(&ctx->guilty_count)); + i915_gem_context_set_banned(ctx); + } + + if (!IS_ERR_OR_NULL(ctx->file_priv)) + client_mark_guilty(ctx->file_priv, ctx); + + return banned; +} + +static void context_mark_innocent(struct i915_gem_context *ctx) +{ + atomic_inc(&ctx->active_count); +} + +void __i915_request_reset(struct i915_request *rq, bool guilty) +{ + GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n", + rq->engine->name, + rq->fence.context, + rq->fence.seqno, + yesno(guilty)); + + GEM_BUG_ON(i915_request_completed(rq)); + + if (guilty) { + i915_request_skip(rq, -EIO); + if (context_mark_guilty(rq->gem_context)) + engine_skip_context(rq); + } else { + dma_fence_set_error(&rq->fence, -EAGAIN); + context_mark_innocent(rq->gem_context); + } +} + +static bool i915_in_reset(struct pci_dev *pdev) +{ + u8 gdrst; + + pci_read_config_byte(pdev, I915_GDRST, &gdrst); + return gdrst & GRDOM_RESET_STATUS; +} + +static int i915_do_reset(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct pci_dev *pdev = gt->i915->drm.pdev; + int err; + + /* Assert reset for at least 20 usec, and wait for acknowledgement. */ + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + udelay(50); + err = wait_for_atomic(i915_in_reset(pdev), 50); + + /* Clear the reset request. */ + pci_write_config_byte(pdev, I915_GDRST, 0); + udelay(50); + if (!err) + err = wait_for_atomic(!i915_in_reset(pdev), 50); + + return err; +} + +static bool g4x_reset_complete(struct pci_dev *pdev) +{ + u8 gdrst; + + pci_read_config_byte(pdev, I915_GDRST, &gdrst); + return (gdrst & GRDOM_RESET_ENABLE) == 0; +} + +static int g33_do_reset(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct pci_dev *pdev = gt->i915->drm.pdev; + + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + return wait_for_atomic(g4x_reset_complete(pdev), 50); +} + +static int g4x_do_reset(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct pci_dev *pdev = gt->i915->drm.pdev; + struct intel_uncore *uncore = gt->uncore; + int ret; + + /* WaVcpClkGateDisableForMediaReset:ctg,elk */ + rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE); + intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); + + pci_write_config_byte(pdev, I915_GDRST, + GRDOM_MEDIA | GRDOM_RESET_ENABLE); + ret = wait_for_atomic(g4x_reset_complete(pdev), 50); + if (ret) { + DRM_DEBUG_DRIVER("Wait for media reset failed\n"); + goto out; + } + + pci_write_config_byte(pdev, I915_GDRST, + GRDOM_RENDER | GRDOM_RESET_ENABLE); + ret = wait_for_atomic(g4x_reset_complete(pdev), 50); + if (ret) { + DRM_DEBUG_DRIVER("Wait for render reset failed\n"); + goto out; + } + +out: + pci_write_config_byte(pdev, I915_GDRST, 0); + + rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE); + intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); + + return ret; +} + +static int ironlake_do_reset(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct intel_uncore *uncore = gt->uncore; + int ret; + + intel_uncore_write_fw(uncore, ILK_GDSR, + ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); + ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, + ILK_GRDOM_RESET_ENABLE, 0, + 5000, 0, + NULL); + if (ret) { + DRM_DEBUG_DRIVER("Wait for render reset failed\n"); + goto out; + } + + intel_uncore_write_fw(uncore, ILK_GDSR, + ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); + ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, + ILK_GRDOM_RESET_ENABLE, 0, + 5000, 0, + NULL); + if (ret) { + DRM_DEBUG_DRIVER("Wait for media reset failed\n"); + goto out; + } + +out: + intel_uncore_write_fw(uncore, ILK_GDSR, 0); + intel_uncore_posting_read_fw(uncore, ILK_GDSR); + return ret; +} + +/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ +static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) +{ + struct intel_uncore *uncore = gt->uncore; + int err; + + /* + * GEN6_GDRST is not in the gt power well, no need to check + * for fifo space for the write or forcewake the chip for + * the read + */ + intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); + + /* Wait for the device to ack the reset requests */ + err = __intel_wait_for_register_fw(uncore, + GEN6_GDRST, hw_domain_mask, 0, + 500, 0, + NULL); + if (err) + DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", + hw_domain_mask); + + return err; +} + +static int gen6_reset_engines(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + static const u32 hw_engine_mask[] = { + [RCS0] = GEN6_GRDOM_RENDER, + [BCS0] = GEN6_GRDOM_BLT, + [VCS0] = GEN6_GRDOM_MEDIA, + [VCS1] = GEN8_GRDOM_MEDIA2, + [VECS0] = GEN6_GRDOM_VECS, + }; + struct intel_engine_cs *engine; + u32 hw_mask; + + if (engine_mask == ALL_ENGINES) { + hw_mask = GEN6_GRDOM_FULL; + } else { + intel_engine_mask_t tmp; + + hw_mask = 0; + for_each_engine_masked(engine, gt, engine_mask, tmp) { + GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); + hw_mask |= hw_engine_mask[engine->id]; + } + } + + return gen6_hw_domain_reset(gt, hw_mask); +} + +static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) +{ + struct intel_uncore *uncore = engine->uncore; + u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; + i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; + u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; + i915_reg_t sfc_usage; + u32 sfc_usage_bit; + u32 sfc_reset_bit; + int ret; + + switch (engine->class) { + case VIDEO_DECODE_CLASS: + if ((BIT(engine->instance) & vdbox_sfc_access) == 0) + return 0; + + sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; + + sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; + + sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT; + sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); + break; + + case VIDEO_ENHANCEMENT_CLASS: + sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; + + sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine); + sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; + + sfc_usage = GEN11_VECS_SFC_USAGE(engine); + sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT; + sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); + break; + + default: + return 0; + } + + /* + * If the engine is using a SFC, tell the engine that a software reset + * is going to happen. The engine will then try to force lock the SFC. + * If SFC ends up being locked to the engine we want to reset, we have + * to reset it as well (we will unlock it once the reset sequence is + * completed). + */ + if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)) + return 0; + + rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); + + ret = __intel_wait_for_register_fw(uncore, + sfc_forced_lock_ack, + sfc_forced_lock_ack_bit, + sfc_forced_lock_ack_bit, + 1000, 0, NULL); + + /* Was the SFC released while we were trying to lock it? */ + if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)) + return 0; + + if (ret) { + DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); + return ret; + } + + *hw_mask |= sfc_reset_bit; + return 0; +} + +static void gen11_unlock_sfc(struct intel_engine_cs *engine) +{ + struct intel_uncore *uncore = engine->uncore; + u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; + i915_reg_t sfc_forced_lock; + u32 sfc_forced_lock_bit; + + switch (engine->class) { + case VIDEO_DECODE_CLASS: + if ((BIT(engine->instance) & vdbox_sfc_access) == 0) + return; + + sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; + break; + + case VIDEO_ENHANCEMENT_CLASS: + sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; + break; + + default: + return; + } + + rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); +} + +static int gen11_reset_engines(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + static const u32 hw_engine_mask[] = { + [RCS0] = GEN11_GRDOM_RENDER, + [BCS0] = GEN11_GRDOM_BLT, + [VCS0] = GEN11_GRDOM_MEDIA, + [VCS1] = GEN11_GRDOM_MEDIA2, + [VCS2] = GEN11_GRDOM_MEDIA3, + [VCS3] = GEN11_GRDOM_MEDIA4, + [VECS0] = GEN11_GRDOM_VECS, + [VECS1] = GEN11_GRDOM_VECS2, + }; + struct intel_engine_cs *engine; + intel_engine_mask_t tmp; + u32 hw_mask; + int ret; + + if (engine_mask == ALL_ENGINES) { + hw_mask = GEN11_GRDOM_FULL; + } else { + hw_mask = 0; + for_each_engine_masked(engine, gt, engine_mask, tmp) { + GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); + hw_mask |= hw_engine_mask[engine->id]; + ret = gen11_lock_sfc(engine, &hw_mask); + if (ret) + goto sfc_unlock; + } + } + + ret = gen6_hw_domain_reset(gt, hw_mask); + +sfc_unlock: + /* + * We unlock the SFC based on the lock status and not the result of + * gen11_lock_sfc to make sure that we clean properly if something + * wrong happened during the lock (e.g. lock acquired after timeout + * expiration). + */ + if (engine_mask != ALL_ENGINES) + for_each_engine_masked(engine, gt, engine_mask, tmp) + gen11_unlock_sfc(engine); + + return ret; +} + +static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_uncore *uncore = engine->uncore; + const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base); + u32 request, mask, ack; + int ret; + + ack = intel_uncore_read_fw(uncore, reg); + if (ack & RESET_CTL_CAT_ERROR) { + /* + * For catastrophic errors, ready-for-reset sequence + * needs to be bypassed: HAS#396813 + */ + request = RESET_CTL_CAT_ERROR; + mask = RESET_CTL_CAT_ERROR; + + /* Catastrophic errors need to be cleared by HW */ + ack = 0; + } else if (!(ack & RESET_CTL_READY_TO_RESET)) { + request = RESET_CTL_REQUEST_RESET; + mask = RESET_CTL_READY_TO_RESET; + ack = RESET_CTL_READY_TO_RESET; + } else { + return 0; + } + + intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request)); + ret = __intel_wait_for_register_fw(uncore, reg, mask, ack, + 700, 0, NULL); + if (ret) + DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n", + engine->name, request, + intel_uncore_read_fw(uncore, reg)); + + return ret; +} + +static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) +{ + intel_uncore_write_fw(engine->uncore, + RING_RESET_CTL(engine->mmio_base), + _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); +} + +static int gen8_reset_engines(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct intel_engine_cs *engine; + const bool reset_non_ready = retry >= 1; + intel_engine_mask_t tmp; + int ret; + + for_each_engine_masked(engine, gt, engine_mask, tmp) { + ret = gen8_engine_reset_prepare(engine); + if (ret && !reset_non_ready) + goto skip_reset; + + /* + * If this is not the first failed attempt to prepare, + * we decide to proceed anyway. + * + * By doing so we risk context corruption and with + * some gens (kbl), possible system hang if reset + * happens during active bb execution. + * + * We rather take context corruption instead of + * failed reset with a wedged driver/gpu. And + * active bb execution case should be covered by + * stop_engines() we have before the reset. + */ + } + + if (INTEL_GEN(gt->i915) >= 11) + ret = gen11_reset_engines(gt, engine_mask, retry); + else + ret = gen6_reset_engines(gt, engine_mask, retry); + +skip_reset: + for_each_engine_masked(engine, gt, engine_mask, tmp) + gen8_engine_reset_cancel(engine); + + return ret; +} + +static int mock_reset(struct intel_gt *gt, + intel_engine_mask_t mask, + unsigned int retry) +{ + return 0; +} + +typedef int (*reset_func)(struct intel_gt *, + intel_engine_mask_t engine_mask, + unsigned int retry); + +static reset_func intel_get_gpu_reset(const struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + if (is_mock_gt(gt)) + return mock_reset; + else if (INTEL_GEN(i915) >= 8) + return gen8_reset_engines; + else if (INTEL_GEN(i915) >= 6) + return gen6_reset_engines; + else if (INTEL_GEN(i915) >= 5) + return ironlake_do_reset; + else if (IS_G4X(i915)) + return g4x_do_reset; + else if (IS_G33(i915) || IS_PINEVIEW(i915)) + return g33_do_reset; + else if (INTEL_GEN(i915) >= 3) + return i915_do_reset; + else + return NULL; +} + +int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) +{ + const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; + reset_func reset; + int ret = -ETIMEDOUT; + int retry; + + reset = intel_get_gpu_reset(gt); + if (!reset) + return -ENODEV; + + /* + * If the power well sleeps during the reset, the reset + * request may be dropped and never completes (causing -EIO). + */ + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { + GEM_TRACE("engine_mask=%x\n", engine_mask); + preempt_disable(); + ret = reset(gt, engine_mask, retry); + preempt_enable(); + } + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); + + return ret; +} + +bool intel_has_gpu_reset(const struct intel_gt *gt) +{ + if (!i915_modparams.reset) + return NULL; + + return intel_get_gpu_reset(gt); +} + +bool intel_has_reset_engine(const struct intel_gt *gt) +{ + if (i915_modparams.reset < 2) + return false; + + return INTEL_INFO(gt->i915)->has_reset_engine; +} + +int intel_reset_guc(struct intel_gt *gt) +{ + u32 guc_domain = + INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; + int ret; + + GEM_BUG_ON(!HAS_GT_UC(gt->i915)); + + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + ret = gen6_hw_domain_reset(gt, guc_domain); + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); + + return ret; +} + +/* + * Ensure irq handler finishes, and not run again. + * Also return the active request so that we only search for it once. + */ +static void reset_prepare_engine(struct intel_engine_cs *engine) +{ + /* + * During the reset sequence, we must prevent the engine from + * entering RC6. As the context state is undefined until we restart + * the engine, if it does enter RC6 during the reset, the state + * written to the powercontext is undefined and so we may lose + * GPU state upon resume, i.e. fail to restart after a reset. + */ + intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); + engine->reset.prepare(engine); +} + +static void revoke_mmaps(struct intel_gt *gt) +{ + int i; + + for (i = 0; i < gt->ggtt->num_fences; i++) { + struct drm_vma_offset_node *node; + struct i915_vma *vma; + u64 vma_offset; + + vma = READ_ONCE(gt->ggtt->fence_regs[i].vma); + if (!vma) + continue; + + if (!i915_vma_has_userfault(vma)) + continue; + + GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]); + node = &vma->obj->base.vma_node; + vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; + unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, + drm_vma_node_offset_addr(node) + vma_offset, + vma->size, + 1); + } +} + +static intel_engine_mask_t reset_prepare(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + intel_engine_mask_t awake = 0; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) { + if (intel_engine_pm_get_if_awake(engine)) + awake |= engine->mask; + reset_prepare_engine(engine); + } + + intel_uc_reset_prepare(>->uc); + + return awake; +} + +static void gt_revoke(struct intel_gt *gt) +{ + revoke_mmaps(gt); +} + +static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + /* + * Everything depends on having the GTT running, so we need to start + * there. + */ + err = i915_ggtt_enable_hw(gt->i915); + if (err) + return err; + + for_each_engine(engine, gt, id) + __intel_engine_reset(engine, stalled_mask & engine->mask); + + i915_gem_restore_fences(gt->ggtt); + + return err; +} + +static void reset_finish_engine(struct intel_engine_cs *engine) +{ + engine->reset.finish(engine); + intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); + + intel_engine_breadcrumbs_irq(engine); +} + +static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) { + reset_finish_engine(engine); + if (awake & engine->mask) + intel_engine_pm_put(engine); + } +} + +static void nop_submit_request(struct i915_request *request) +{ + struct intel_engine_cs *engine = request->engine; + unsigned long flags; + + GEM_TRACE("%s fence %llx:%lld -> -EIO\n", + engine->name, request->fence.context, request->fence.seqno); + dma_fence_set_error(&request->fence, -EIO); + + spin_lock_irqsave(&engine->active.lock, flags); + __i915_request_submit(request); + i915_request_mark_complete(request); + spin_unlock_irqrestore(&engine->active.lock, flags); + + intel_engine_queue_breadcrumbs(engine); +} + +static void __intel_gt_set_wedged(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + intel_engine_mask_t awake; + enum intel_engine_id id; + + if (test_bit(I915_WEDGED, >->reset.flags)) + return; + + if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) { + struct drm_printer p = drm_debug_printer(__func__); + + for_each_engine(engine, gt, id) + intel_engine_dump(engine, &p, "%s\n", engine->name); + } + + GEM_TRACE("start\n"); + + /* + * First, stop submission to hw, but do not yet complete requests by + * rolling the global seqno forward (since this would complete requests + * for which we haven't set the fence error to EIO yet). + */ + awake = reset_prepare(gt); + + /* Even if the GPU reset fails, it should still stop the engines */ + if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + __intel_gt_reset(gt, ALL_ENGINES); + + for_each_engine(engine, gt, id) + engine->submit_request = nop_submit_request; + + /* + * Make sure no request can slip through without getting completed by + * either this call here to intel_engine_write_global_seqno, or the one + * in nop_submit_request. + */ + synchronize_rcu_expedited(); + set_bit(I915_WEDGED, >->reset.flags); + + /* Mark all executing requests as skipped */ + for_each_engine(engine, gt, id) + engine->cancel_requests(engine); + + reset_finish(gt, awake); + + GEM_TRACE("end\n"); +} + +void intel_gt_set_wedged(struct intel_gt *gt) +{ + intel_wakeref_t wakeref; + + mutex_lock(>->reset.mutex); + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + __intel_gt_set_wedged(gt); + mutex_unlock(>->reset.mutex); +} + +static bool __intel_gt_unset_wedged(struct intel_gt *gt) +{ + struct intel_gt_timelines *timelines = >->timelines; + struct intel_timeline *tl; + unsigned long flags; + bool ok; + + if (!test_bit(I915_WEDGED, >->reset.flags)) + return true; + + /* Never fully initialised, recovery impossible */ + if (test_bit(I915_WEDGED_ON_INIT, >->reset.flags)) + return false; + + GEM_TRACE("start\n"); + + /* + * Before unwedging, make sure that all pending operations + * are flushed and errored out - we may have requests waiting upon + * third party fences. We marked all inflight requests as EIO, and + * every execbuf since returned EIO, for consistency we want all + * the currently pending requests to also be marked as EIO, which + * is done inside our nop_submit_request - and so we must wait. + * + * No more can be submitted until we reset the wedged bit. + */ + spin_lock_irqsave(&timelines->lock, flags); + list_for_each_entry(tl, &timelines->active_list, link) { + struct dma_fence *fence; + + fence = i915_active_fence_get(&tl->last_request); + if (!fence) + continue; + + spin_unlock_irqrestore(&timelines->lock, flags); + + /* + * All internal dependencies (i915_requests) will have + * been flushed by the set-wedge, but we may be stuck waiting + * for external fences. These should all be capped to 10s + * (I915_FENCE_TIMEOUT) so this wait should not be unbounded + * in the worst case. + */ + dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT); + dma_fence_put(fence); + + /* Restart iteration after droping lock */ + spin_lock_irqsave(&timelines->lock, flags); + tl = list_entry(&timelines->active_list, typeof(*tl), link); + } + spin_unlock_irqrestore(&timelines->lock, flags); + + /* We must reset pending GPU events before restoring our submission */ + ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ + if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + ok = __intel_gt_reset(gt, ALL_ENGINES) == 0; + if (!ok) { + /* + * Warn CI about the unrecoverable wedged condition. + * Time for a reboot. + */ + add_taint_for_CI(TAINT_WARN); + return false; + } + + /* + * Undo nop_submit_request. We prevent all new i915 requests from + * being queued (by disallowing execbuf whilst wedged) so having + * waited for all active requests above, we know the system is idle + * and do not have to worry about a thread being inside + * engine->submit_request() as we swap over. So unlike installing + * the nop_submit_request on reset, we can do this from normal + * context and do not require stop_machine(). + */ + intel_engines_reset_default_submission(gt); + + GEM_TRACE("end\n"); + + smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ + clear_bit(I915_WEDGED, >->reset.flags); + + return true; +} + +bool intel_gt_unset_wedged(struct intel_gt *gt) +{ + bool result; + + mutex_lock(>->reset.mutex); + result = __intel_gt_unset_wedged(gt); + mutex_unlock(>->reset.mutex); + + return result; +} + +static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) +{ + int err, i; + + gt_revoke(gt); + + err = __intel_gt_reset(gt, ALL_ENGINES); + for (i = 0; err && i < RESET_MAX_RETRIES; i++) { + msleep(10 * (i + 1)); + err = __intel_gt_reset(gt, ALL_ENGINES); + } + if (err) + return err; + + return gt_reset(gt, stalled_mask); +} + +static int resume(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int ret; + + for_each_engine(engine, gt, id) { + ret = engine->resume(engine); + if (ret) + return ret; + } + + return 0; +} + +/** + * intel_gt_reset - reset chip after a hang + * @gt: #intel_gt to reset + * @stalled_mask: mask of the stalled engines with the guilty requests + * @reason: user error message for why we are resetting + * + * Reset the chip. Useful if a hang is detected. Marks the device as wedged + * on failure. + * + * Procedure is fairly simple: + * - reset the chip using the reset reg + * - re-init context state + * - re-init hardware status page + * - re-init ring buffer + * - re-init interrupt state + * - re-init display + */ +void intel_gt_reset(struct intel_gt *gt, + intel_engine_mask_t stalled_mask, + const char *reason) +{ + intel_engine_mask_t awake; + int ret; + + GEM_TRACE("flags=%lx\n", gt->reset.flags); + + might_sleep(); + GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags)); + mutex_lock(>->reset.mutex); + + /* Clear any previous failed attempts at recovery. Time to try again. */ + if (!__intel_gt_unset_wedged(gt)) + goto unlock; + + if (reason) + dev_notice(gt->i915->drm.dev, + "Resetting chip for %s\n", reason); + atomic_inc(>->i915->gpu_error.reset_count); + + awake = reset_prepare(gt); + + if (!intel_has_gpu_reset(gt)) { + if (i915_modparams.reset) + dev_err(gt->i915->drm.dev, "GPU reset not supported\n"); + else + DRM_DEBUG_DRIVER("GPU reset disabled\n"); + goto error; + } + + if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + intel_runtime_pm_disable_interrupts(gt->i915); + + if (do_reset(gt, stalled_mask)) { + dev_err(gt->i915->drm.dev, "Failed to reset chip\n"); + goto taint; + } + + if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + intel_runtime_pm_enable_interrupts(gt->i915); + + intel_overlay_reset(gt->i915); + + /* + * Next we need to restore the context, but we don't use those + * yet either... + * + * Ring buffer needs to be re-initialized in the KMS case, or if X + * was running at the time of the reset (i.e. we weren't VT + * switched away). + */ + ret = intel_gt_init_hw(gt); + if (ret) { + DRM_ERROR("Failed to initialise HW following reset (%d)\n", + ret); + goto taint; + } + + ret = resume(gt); + if (ret) + goto taint; + +finish: + reset_finish(gt, awake); +unlock: + mutex_unlock(>->reset.mutex); + return; + +taint: + /* + * History tells us that if we cannot reset the GPU now, we + * never will. This then impacts everything that is run + * subsequently. On failing the reset, we mark the driver + * as wedged, preventing further execution on the GPU. + * We also want to go one step further and add a taint to the + * kernel so that any subsequent faults can be traced back to + * this failure. This is important for CI, where if the + * GPU/driver fails we would like to reboot and restart testing + * rather than continue on into oblivion. For everyone else, + * the system should still plod along, but they have been warned! + */ + add_taint_for_CI(TAINT_WARN); +error: + __intel_gt_set_wedged(gt); + goto finish; +} + +static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) +{ + return __intel_gt_reset(engine->gt, engine->mask); +} + +/** + * intel_engine_reset - reset GPU engine to recover from a hang + * @engine: engine to reset + * @msg: reason for GPU reset; or NULL for no dev_notice() + * + * Reset a specific GPU engine. Useful if a hang is detected. + * Returns zero on successful reset or otherwise an error code. + * + * Procedure is: + * - identifies the request that caused the hang and it is dropped + * - reset engine (which will force the engine to idle) + * - re-init/configure engine + */ +int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) +{ + struct intel_gt *gt = engine->gt; + int ret; + + GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags); + GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); + + if (!intel_engine_pm_get_if_awake(engine)) + return 0; + + reset_prepare_engine(engine); + + if (msg) + dev_notice(engine->i915->drm.dev, + "Resetting %s for %s\n", engine->name, msg); + atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); + + if (!engine->gt->uc.guc.execbuf_client) + ret = intel_gt_reset_engine(engine); + else + ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); + if (ret) { + /* If we fail here, we expect to fallback to a global reset */ + DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", + engine->gt->uc.guc.execbuf_client ? "GuC " : "", + engine->name, ret); + goto out; + } + + /* + * The request that caused the hang is stuck on elsp, we know the + * active request and can drop it, adjust head to skip the offending + * request to resume executing remaining requests in the queue. + */ + __intel_engine_reset(engine, true); + + /* + * The engine and its registers (and workarounds in case of render) + * have been reset to their default values. Follow the init_ring + * process to program RING_MODE, HWSP and re-enable submission. + */ + ret = engine->resume(engine); + +out: + intel_engine_cancel_stop_cs(engine); + reset_finish_engine(engine); + intel_engine_pm_put(engine); + return ret; +} + +static void intel_gt_reset_global(struct intel_gt *gt, + u32 engine_mask, + const char *reason) +{ + struct kobject *kobj = >->i915->drm.primary->kdev->kobj; + char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; + char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; + char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; + struct intel_wedge_me w; + + kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); + + DRM_DEBUG_DRIVER("resetting chip\n"); + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); + + /* Use a watchdog to ensure that our reset completes */ + intel_wedge_on_timeout(&w, gt, 5 * HZ) { + intel_prepare_reset(gt->i915); + + /* Flush everyone using a resource about to be clobbered */ + synchronize_srcu_expedited(>->reset.backoff_srcu); + + intel_gt_reset(gt, engine_mask, reason); + + intel_finish_reset(gt->i915); + } + + if (!test_bit(I915_WEDGED, >->reset.flags)) + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); +} + +/** + * intel_gt_handle_error - handle a gpu error + * @gt: the intel_gt + * @engine_mask: mask representing engines that are hung + * @flags: control flags + * @fmt: Error message format string + * + * Do some basic checking of register state at error time and + * dump it to the syslog. Also call i915_capture_error_state() to make + * sure we get a record and make it available in debugfs. Fire a uevent + * so userspace knows something bad happened (should trigger collection + * of a ring dump etc.). + */ +void intel_gt_handle_error(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned long flags, + const char *fmt, ...) +{ + struct intel_engine_cs *engine; + intel_wakeref_t wakeref; + intel_engine_mask_t tmp; + char error_msg[80]; + char *msg = NULL; + + if (fmt) { + va_list args; + + va_start(args, fmt); + vscnprintf(error_msg, sizeof(error_msg), fmt, args); + va_end(args); + + msg = error_msg; + } + + /* + * In most cases it's guaranteed that we get here with an RPM + * reference held, for example because there is a pending GPU + * request that won't finish until the reset is done. This + * isn't the case at least when we get here by doing a + * simulated reset via debugfs, so get an RPM reference. + */ + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + engine_mask &= INTEL_INFO(gt->i915)->engine_mask; + + if (flags & I915_ERROR_CAPTURE) { + i915_capture_error_state(gt->i915, engine_mask, msg); + intel_gt_clear_error_registers(gt, engine_mask); + } + + /* + * Try engine reset when available. We fall back to full reset if + * single reset fails. + */ + if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { + for_each_engine_masked(engine, gt, engine_mask, tmp) { + BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); + if (test_and_set_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags)) + continue; + + if (intel_engine_reset(engine, msg) == 0) + engine_mask &= ~engine->mask; + + clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags); + } + } + + if (!engine_mask) + goto out; + + /* Full reset needs the mutex, stop any other user trying to do so. */ + if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) { + wait_event(gt->reset.queue, + !test_bit(I915_RESET_BACKOFF, >->reset.flags)); + goto out; /* piggy-back on the other reset */ + } + + /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ + synchronize_rcu_expedited(); + + /* Prevent any other reset-engine attempt. */ + for_each_engine(engine, gt, tmp) { + while (test_and_set_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags)) + wait_on_bit(>->reset.flags, + I915_RESET_ENGINE + engine->id, + TASK_UNINTERRUPTIBLE); + } + + intel_gt_reset_global(gt, engine_mask, msg); + + for_each_engine(engine, gt, tmp) + clear_bit_unlock(I915_RESET_ENGINE + engine->id, + >->reset.flags); + clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); + smp_mb__after_atomic(); + wake_up_all(>->reset.queue); + +out: + intel_runtime_pm_put(gt->uncore->rpm, wakeref); +} + +int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu) +{ + might_lock(>->reset.backoff_srcu); + might_sleep(); + + rcu_read_lock(); + while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { + rcu_read_unlock(); + + if (wait_event_interruptible(gt->reset.queue, + !test_bit(I915_RESET_BACKOFF, + >->reset.flags))) + return -EINTR; + + rcu_read_lock(); + } + *srcu = srcu_read_lock(>->reset.backoff_srcu); + rcu_read_unlock(); + + return 0; +} + +void intel_gt_reset_unlock(struct intel_gt *gt, int tag) +__releases(>->reset.backoff_srcu) +{ + srcu_read_unlock(>->reset.backoff_srcu, tag); +} + +int intel_gt_terminally_wedged(struct intel_gt *gt) +{ + might_sleep(); + + if (!intel_gt_is_wedged(gt)) + return 0; + + /* Reset still in progress? Maybe we will recover? */ + if (!test_bit(I915_RESET_BACKOFF, >->reset.flags)) + return -EIO; + + if (wait_event_interruptible(gt->reset.queue, + !test_bit(I915_RESET_BACKOFF, + >->reset.flags))) + return -EINTR; + + return intel_gt_is_wedged(gt) ? -EIO : 0; +} + +void intel_gt_set_wedged_on_init(struct intel_gt *gt) +{ + BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES > + I915_WEDGED_ON_INIT); + intel_gt_set_wedged(gt); + set_bit(I915_WEDGED_ON_INIT, >->reset.flags); +} + +void intel_gt_init_reset(struct intel_gt *gt) +{ + init_waitqueue_head(>->reset.queue); + mutex_init(>->reset.mutex); + init_srcu_struct(>->reset.backoff_srcu); +} + +void intel_gt_fini_reset(struct intel_gt *gt) +{ + cleanup_srcu_struct(>->reset.backoff_srcu); +} + +static void intel_wedge_me(struct work_struct *work) +{ + struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); + + dev_err(w->gt->i915->drm.dev, + "%s timed out, cancelling all in-flight rendering.\n", + w->name); + intel_gt_set_wedged(w->gt); +} + +void __intel_init_wedge(struct intel_wedge_me *w, + struct intel_gt *gt, + long timeout, + const char *name) +{ + w->gt = gt; + w->name = name; + + INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me); + schedule_delayed_work(&w->work, timeout); +} + +void __intel_fini_wedge(struct intel_wedge_me *w) +{ + cancel_delayed_work_sync(&w->work); + destroy_delayed_work_on_stack(&w->work); + w->gt = NULL; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_reset.c" +#include "selftest_hangcheck.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h new file mode 100644 index 000000000000..8e8d5f761166 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008-2018 Intel Corporation + */ + +#ifndef I915_RESET_H +#define I915_RESET_H + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/srcu.h> + +#include "intel_engine_types.h" +#include "intel_reset_types.h" + +struct i915_request; +struct intel_engine_cs; +struct intel_gt; +struct intel_guc; + +void intel_gt_init_reset(struct intel_gt *gt); +void intel_gt_fini_reset(struct intel_gt *gt); + +__printf(4, 5) +void intel_gt_handle_error(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned long flags, + const char *fmt, ...); +#define I915_ERROR_CAPTURE BIT(0) + +void intel_gt_reset(struct intel_gt *gt, + intel_engine_mask_t stalled_mask, + const char *reason); +int intel_engine_reset(struct intel_engine_cs *engine, + const char *reason); + +void __i915_request_reset(struct i915_request *rq, bool guilty); + +int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu); +void intel_gt_reset_unlock(struct intel_gt *gt, int tag); + +void intel_gt_set_wedged(struct intel_gt *gt); +bool intel_gt_unset_wedged(struct intel_gt *gt); +int intel_gt_terminally_wedged(struct intel_gt *gt); + +/* + * There's no unset_wedged_on_init paired with this one. + * Once we're wedged on init, there's no going back. + */ +void intel_gt_set_wedged_on_init(struct intel_gt *gt); + +int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask); + +int intel_reset_guc(struct intel_gt *gt); + +struct intel_wedge_me { + struct delayed_work work; + struct intel_gt *gt; + const char *name; +}; + +void __intel_init_wedge(struct intel_wedge_me *w, + struct intel_gt *gt, + long timeout, + const char *name); +void __intel_fini_wedge(struct intel_wedge_me *w); + +#define intel_wedge_on_timeout(W, GT, TIMEOUT) \ + for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \ + (W)->gt; \ + __intel_fini_wedge((W))) + +static inline bool __intel_reset_failed(const struct intel_reset *reset) +{ + GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ? + !test_bit(I915_WEDGED, &reset->flags) : false); + + return unlikely(test_bit(I915_WEDGED, &reset->flags)); +} + +bool intel_has_gpu_reset(const struct intel_gt *gt); +bool intel_has_reset_engine(const struct intel_gt *gt); + +#endif /* I915_RESET_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h new file mode 100644 index 000000000000..f43bc3a0fe4f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_RESET_TYPES_H_ +#define __INTEL_RESET_TYPES_H_ + +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/srcu.h> + +struct intel_reset { + /** + * flags: Control various stages of the GPU reset + * + * #I915_RESET_BACKOFF - When we start a global reset, we need to + * serialise with any other users attempting to do the same, and + * any global resources that may be clobber by the reset (such as + * FENCE registers). + * + * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to + * acquire the struct_mutex to reset an engine, we need an explicit + * flag to prevent two concurrent reset attempts in the same engine. + * As the number of engines continues to grow, allocate the flags from + * the most significant bits. + * + * #I915_WEDGED - If reset fails and we can no longer use the GPU, + * we set the #I915_WEDGED bit. Prior to command submission, e.g. + * i915_request_alloc(), this bit is checked and the sequence + * aborted (with -EIO reported to userspace) if set. + * + * #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no + * longer use the GPU - similar to #I915_WEDGED bit. The difference in + * in the way we're handling "forced" unwedged (e.g. through debugfs), + * which is not allowed in case we failed to initialize. + */ + unsigned long flags; +#define I915_RESET_BACKOFF 0 +#define I915_RESET_MODESET 1 +#define I915_RESET_ENGINE 2 +#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2) +#define I915_WEDGED (BITS_PER_LONG - 1) + + struct mutex mutex; /* serialises wedging/unwedging */ + + /** + * Waitqueue to signal when the reset has completed. Used by clients + * that wait for dev_priv->mm.wedged to settle. + */ + wait_queue_head_t queue; + + struct srcu_struct backoff_srcu; +}; + +#endif /* _INTEL_RESET_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c new file mode 100644 index 000000000000..ece20504d240 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -0,0 +1,323 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "gem/i915_gem_object.h" +#include "i915_drv.h" +#include "i915_vma.h" +#include "intel_engine.h" +#include "intel_ring.h" +#include "intel_timeline.h" + +unsigned int intel_ring_update_space(struct intel_ring *ring) +{ + unsigned int space; + + space = __intel_ring_space(ring->head, ring->emit, ring->size); + + ring->space = space; + return space; +} + +int intel_ring_pin(struct intel_ring *ring) +{ + struct i915_vma *vma = ring->vma; + unsigned int flags; + void *addr; + int ret; + + if (atomic_fetch_inc(&ring->pin_count)) + return 0; + + flags = PIN_GLOBAL; + + /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + + if (vma->obj->stolen) + flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; + + ret = i915_vma_pin(vma, 0, 0, flags); + if (unlikely(ret)) + goto err_unpin; + + if (i915_vma_is_map_and_fenceable(vma)) + addr = (void __force *)i915_vma_pin_iomap(vma); + else + addr = i915_gem_object_pin_map(vma->obj, + i915_coherent_map_type(vma->vm->i915)); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto err_ring; + } + + i915_vma_make_unshrinkable(vma); + + GEM_BUG_ON(ring->vaddr); + ring->vaddr = addr; + + return 0; + +err_ring: + i915_vma_unpin(vma); +err_unpin: + atomic_dec(&ring->pin_count); + return ret; +} + +void intel_ring_reset(struct intel_ring *ring, u32 tail) +{ + tail = intel_ring_wrap(ring, tail); + ring->tail = tail; + ring->head = tail; + ring->emit = tail; + intel_ring_update_space(ring); +} + +void intel_ring_unpin(struct intel_ring *ring) +{ + struct i915_vma *vma = ring->vma; + + if (!atomic_dec_and_test(&ring->pin_count)) + return; + + /* Discard any unused bytes beyond that submitted to hw. */ + intel_ring_reset(ring, ring->emit); + + i915_vma_unset_ggtt_write(vma); + if (i915_vma_is_map_and_fenceable(vma)) + i915_vma_unpin_iomap(vma); + else + i915_gem_object_unpin_map(vma->obj); + + GEM_BUG_ON(!ring->vaddr); + ring->vaddr = NULL; + + i915_vma_unpin(vma); + i915_vma_make_purgeable(vma); +} + +static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) +{ + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = ERR_PTR(-ENODEV); + if (i915_ggtt_has_aperture(ggtt)) + obj = i915_gem_object_create_stolen(i915, size); + if (IS_ERR(obj)) + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + /* + * Mark ring buffers as read-only from GPU side (so no stray overwrites) + * if supported by the platform's GGTT. + */ + if (vm->has_read_only) + i915_gem_object_set_readonly(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return vma; +} + +struct intel_ring * +intel_engine_create_ring(struct intel_engine_cs *engine, int size) +{ + struct drm_i915_private *i915 = engine->i915; + struct intel_ring *ring; + struct i915_vma *vma; + + GEM_BUG_ON(!is_power_of_2(size)); + GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + return ERR_PTR(-ENOMEM); + + kref_init(&ring->ref); + ring->size = size; + + /* + * Workaround an erratum on the i830 which causes a hang if + * the TAIL pointer points to within the last 2 cachelines + * of the buffer. + */ + ring->effective_size = size; + if (IS_I830(i915) || IS_I845G(i915)) + ring->effective_size -= 2 * CACHELINE_BYTES; + + intel_ring_update_space(ring); + + vma = create_ring_vma(engine->gt->ggtt, size); + if (IS_ERR(vma)) { + kfree(ring); + return ERR_CAST(vma); + } + ring->vma = vma; + + return ring; +} + +void intel_ring_free(struct kref *ref) +{ + struct intel_ring *ring = container_of(ref, typeof(*ring), ref); + + i915_vma_put(ring->vma); + kfree(ring); +} + +static noinline int +wait_for_space(struct intel_ring *ring, + struct intel_timeline *tl, + unsigned int bytes) +{ + struct i915_request *target; + long timeout; + + if (intel_ring_update_space(ring) >= bytes) + return 0; + + GEM_BUG_ON(list_empty(&tl->requests)); + list_for_each_entry(target, &tl->requests, link) { + if (target->ring != ring) + continue; + + /* Would completion of this request free enough space? */ + if (bytes <= __intel_ring_space(target->postfix, + ring->emit, ring->size)) + break; + } + + if (GEM_WARN_ON(&target->link == &tl->requests)) + return -ENOSPC; + + timeout = i915_request_wait(target, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); + if (timeout < 0) + return timeout; + + i915_request_retire_upto(target); + + intel_ring_update_space(ring); + GEM_BUG_ON(ring->space < bytes); + return 0; +} + +u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) +{ + struct intel_ring *ring = rq->ring; + const unsigned int remain_usable = ring->effective_size - ring->emit; + const unsigned int bytes = num_dwords * sizeof(u32); + unsigned int need_wrap = 0; + unsigned int total_bytes; + u32 *cs; + + /* Packets must be qword aligned. */ + GEM_BUG_ON(num_dwords & 1); + + total_bytes = bytes + rq->reserved_space; + GEM_BUG_ON(total_bytes > ring->effective_size); + + if (unlikely(total_bytes > remain_usable)) { + const int remain_actual = ring->size - ring->emit; + + if (bytes > remain_usable) { + /* + * Not enough space for the basic request. So need to + * flush out the remainder and then wait for + * base + reserved. + */ + total_bytes += remain_actual; + need_wrap = remain_actual | 1; + } else { + /* + * The base request will fit but the reserved space + * falls off the end. So we don't need an immediate + * wrap and only need to effectively wait for the + * reserved size from the start of ringbuffer. + */ + total_bytes = rq->reserved_space + remain_actual; + } + } + + if (unlikely(total_bytes > ring->space)) { + int ret; + + /* + * Space is reserved in the ringbuffer for finalising the + * request, as that cannot be allowed to fail. During request + * finalisation, reserved_space is set to 0 to stop the + * overallocation and the assumption is that then we never need + * to wait (which has the risk of failing with EINTR). + * + * See also i915_request_alloc() and i915_request_add(). + */ + GEM_BUG_ON(!rq->reserved_space); + + ret = wait_for_space(ring, + i915_request_timeline(rq), + total_bytes); + if (unlikely(ret)) + return ERR_PTR(ret); + } + + if (unlikely(need_wrap)) { + need_wrap &= ~1; + GEM_BUG_ON(need_wrap > ring->space); + GEM_BUG_ON(ring->emit + need_wrap > ring->size); + GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); + + /* Fill the tail with MI_NOOP */ + memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); + ring->space -= need_wrap; + ring->emit = 0; + } + + GEM_BUG_ON(ring->emit > ring->size - bytes); + GEM_BUG_ON(ring->space < bytes); + cs = ring->vaddr + ring->emit; + GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); + ring->emit += bytes; + ring->space -= bytes; + + return cs; +} + +/* Align the ring tail to a cacheline boundary */ +int intel_ring_cacheline_align(struct i915_request *rq) +{ + int num_dwords; + void *cs; + + num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); + if (num_dwords == 0) + return 0; + + num_dwords = CACHELINE_DWORDS - num_dwords; + GEM_BUG_ON(num_dwords & 1); + + cs = intel_ring_begin(rq, num_dwords); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); + intel_ring_advance(rq, cs + num_dwords); + + GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h new file mode 100644 index 000000000000..ea2839d9e044 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring.h @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RING_H +#define INTEL_RING_H + +#include "i915_gem.h" /* GEM_BUG_ON */ +#include "i915_request.h" +#include "intel_ring_types.h" + +struct intel_engine_cs; + +struct intel_ring * +intel_engine_create_ring(struct intel_engine_cs *engine, int size); + +u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords); +int intel_ring_cacheline_align(struct i915_request *rq); + +unsigned int intel_ring_update_space(struct intel_ring *ring); + +int intel_ring_pin(struct intel_ring *ring); +void intel_ring_unpin(struct intel_ring *ring); +void intel_ring_reset(struct intel_ring *ring, u32 tail); + +void intel_ring_free(struct kref *ref); + +static inline struct intel_ring *intel_ring_get(struct intel_ring *ring) +{ + kref_get(&ring->ref); + return ring; +} + +static inline void intel_ring_put(struct intel_ring *ring) +{ + kref_put(&ring->ref, intel_ring_free); +} + +static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) +{ + /* Dummy function. + * + * This serves as a placeholder in the code so that the reader + * can compare against the preceding intel_ring_begin() and + * check that the number of dwords emitted matches the space + * reserved for the command packet (i.e. the value passed to + * intel_ring_begin()). + */ + GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); +} + +static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) +{ + return pos & (ring->size - 1); +} + +static inline bool +intel_ring_offset_valid(const struct intel_ring *ring, + unsigned int pos) +{ + if (pos & -ring->size) /* must be strictly within the ring */ + return false; + + if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */ + return false; + + return true; +} + +static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) +{ + /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ + u32 offset = addr - rq->ring->vaddr; + GEM_BUG_ON(offset > rq->ring->size); + return intel_ring_wrap(rq->ring, offset); +} + +static inline void +assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) +{ + GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); + + /* + * "Ring Buffer Use" + * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 + * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5 + * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5 + * "If the Ring Buffer Head Pointer and the Tail Pointer are on the + * same cacheline, the Head Pointer must not be greater than the Tail + * Pointer." + * + * We use ring->head as the last known location of the actual RING_HEAD, + * it may have advanced but in the worst case it is equally the same + * as ring->head and so we should never program RING_TAIL to advance + * into the same cacheline as ring->head. + */ +#define cacheline(a) round_down(a, CACHELINE_BYTES) + GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) && + tail < ring->head); +#undef cacheline +} + +static inline unsigned int +intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) +{ + /* Whilst writes to the tail are strictly order, there is no + * serialisation between readers and the writers. The tail may be + * read by i915_request_retire() just as it is being updated + * by execlists, as although the breadcrumb is complete, the context + * switch hasn't been seen. + */ + assert_ring_tail_valid(ring, tail); + ring->tail = tail; + return tail; +} + +static inline unsigned int +__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) +{ + /* + * "If the Ring Buffer Head Pointer and the Tail Pointer are on the + * same cacheline, the Head Pointer must not be greater than the Tail + * Pointer." + */ + GEM_BUG_ON(!is_power_of_2(size)); + return (head - tail - CACHELINE_BYTES) & (size - 1); +} + +#endif /* INTEL_RING_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c new file mode 100644 index 000000000000..a47d5a7c32c9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -0,0 +1,2053 @@ +/* + * Copyright © 2008-2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <[email protected]> + * Zou Nan hai <[email protected]> + * Xiang Hai hao<[email protected]> + * + */ + +#include <linux/log2.h> + +#include <drm/i915_drm.h> + +#include "gem/i915_gem_context.h" + +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_context.h" +#include "intel_gt.h" +#include "intel_gt_irq.h" +#include "intel_gt_pm_irq.h" +#include "intel_reset.h" +#include "intel_ring.h" +#include "intel_workarounds.h" + +/* Rough estimate of the typical request size, performing a flush, + * set-context and then emitting the batch. + */ +#define LEGACY_REQUEST_SIZE 200 + +static int +gen2_render_ring_flush(struct i915_request *rq, u32 mode) +{ + unsigned int num_store_dw; + u32 cmd, *cs; + + cmd = MI_FLUSH; + num_store_dw = 0; + if (mode & EMIT_INVALIDATE) + cmd |= MI_READ_FLUSH; + if (mode & EMIT_FLUSH) + num_store_dw = 4; + + cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = cmd; + while (num_store_dw--) { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); + *cs++ = 0; + } + *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; + + intel_ring_advance(rq, cs); + + return 0; +} + +static int +gen4_render_ring_flush(struct i915_request *rq, u32 mode) +{ + u32 cmd, *cs; + int i; + + /* + * read/write caches: + * + * I915_GEM_DOMAIN_RENDER is always invalidated, but is + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is + * also flushed at 2d versus 3d pipeline switches. + * + * read-only caches: + * + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if + * MI_READ_FLUSH is set, and is always flushed on 965. + * + * I915_GEM_DOMAIN_COMMAND may not exist? + * + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is + * invalidated when MI_EXE_FLUSH is set. + * + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is + * invalidated with every MI_FLUSH. + * + * TLBs: + * + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER + * are flushed at any MI_FLUSH. + */ + + cmd = MI_FLUSH; + if (mode & EMIT_INVALIDATE) { + cmd |= MI_EXE_FLUSH; + if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5)) + cmd |= MI_INVALIDATE_ISP; + } + + i = 2; + if (mode & EMIT_INVALIDATE) + i += 20; + + cs = intel_ring_begin(rq, i); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = cmd; + + /* + * A random delay to let the CS invalidate take effect? Without this + * delay, the GPU relocation path fails as the CS does not see + * the updated contents. Just as important, if we apply the flushes + * to the EMIT_FLUSH branch (i.e. immediately after the relocation + * write and before the invalidate on the next batch), the relocations + * still fail. This implies that is a delay following invalidation + * that is required to reset the caches as opposed to a delay to + * ensure the memory is written. + */ + if (mode & EMIT_INVALIDATE) { + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT) | + PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + *cs++ = 0; + + for (i = 0; i < 12; i++) + *cs++ = MI_FLUSH; + + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT) | + PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + *cs++ = 0; + } + + *cs++ = cmd; + + intel_ring_advance(rq, cs); + + return 0; +} + +/* + * Emits a PIPE_CONTROL with a non-zero post-sync operation, for + * implementing two workarounds on gen6. From section 1.4.7.1 + * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: + * + * [DevSNB-C+{W/A}] Before any depth stall flush (including those + * produced by non-pipelined state commands), software needs to first + * send a PIPE_CONTROL with no bits set except Post-Sync Operation != + * 0. + * + * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable + * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. + * + * And the workaround for these two requires this workaround first: + * + * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent + * BEFORE the pipe-control with a post-sync op and no write-cache + * flushes. + * + * And this last workaround is tricky because of the requirements on + * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM + * volume 2 part 1: + * + * "1 of the following must also be set: + * - Render Target Cache Flush Enable ([12] of DW1) + * - Depth Cache Flush Enable ([0] of DW1) + * - Stall at Pixel Scoreboard ([1] of DW1) + * - Depth Stall ([13] of DW1) + * - Post-Sync Operation ([13] of DW1) + * - Notify Enable ([8] of DW1)" + * + * The cache flushes require the workaround flush that triggered this + * one, so we can't use it. Depth stall would trigger the same. + * Post-sync nonzero is what triggered this second workaround, so we + * can't use that one either. Notify enable is IRQs, which aren't + * really our business. That leaves only stall at scoreboard. + */ +static int +gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) +{ + u32 scratch_addr = + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); + u32 *cs; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = GFX_OP_PIPE_CONTROL(5); + *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; + *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; /* low dword */ + *cs++ = 0; /* high dword */ + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = GFX_OP_PIPE_CONTROL(5); + *cs++ = PIPE_CONTROL_QW_WRITE; + *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + *cs++ = 0; + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + return 0; +} + +static int +gen6_render_ring_flush(struct i915_request *rq, u32 mode) +{ + u32 scratch_addr = + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); + u32 *cs, flags = 0; + int ret; + + /* Force SNB workarounds for PIPE_CONTROL flushes */ + ret = gen6_emit_post_sync_nonzero_flush(rq); + if (ret) + return ret; + + /* Just flush everything. Experiments have shown that reducing the + * number of bits based on the write domains has little performance + * impact. + */ + if (mode & EMIT_FLUSH) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* + * Ensure that any following seqno writes only happen + * when the render cache is indeed flushed. + */ + flags |= PIPE_CONTROL_CS_STALL; + } + if (mode & EMIT_INVALIDATE) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; + } + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = flags; + *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + intel_ring_advance(rq, cs); + + return 0; +} + +static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + /* First we do the gen6_emit_post_sync_nonzero_flush w/a */ + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; + *cs++ = 0; + *cs++ = 0; + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_QW_WRITE; + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT) | + PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + + /* Finally we can flush and with it emit the breadcrumb */ + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_CS_STALL); + *cs++ = i915_request_active_timeline(rq)->hwsp_offset | + PIPE_CONTROL_GLOBAL_GTT; + *cs++ = rq->fence.seqno; + + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} + +static int +gen7_render_ring_cs_stall_wa(struct i915_request *rq) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; + *cs++ = 0; + *cs++ = 0; + intel_ring_advance(rq, cs); + + return 0; +} + +static int +gen7_render_ring_flush(struct i915_request *rq, u32 mode) +{ + u32 scratch_addr = + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); + u32 *cs, flags = 0; + + /* + * Ensure that any following seqno writes only happen when the render + * cache is indeed flushed. + * + * Workaround: 4th PIPE_CONTROL command (except the ones with only + * read-cache invalidate bits set) must have the CS_STALL bit set. We + * don't try to be clever and just set it unconditionally. + */ + flags |= PIPE_CONTROL_CS_STALL; + + /* Just flush everything. Experiments have shown that reducing the + * number of bits based on the write domains has little performance + * impact. + */ + if (mode & EMIT_FLUSH) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; + flags |= PIPE_CONTROL_FLUSH_ENABLE; + } + if (mode & EMIT_INVALIDATE) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + + flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; + + /* Workaround: we must issue a pipe_control with CS-stall bit + * set before a pipe_control command that has the state cache + * invalidate bit set. */ + gen7_render_ring_cs_stall_wa(rq); + } + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = flags; + *cs++ = scratch_addr; + *cs++ = 0; + intel_ring_advance(rq, cs); + + return 0; +} + +static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL); + *cs++ = i915_request_active_timeline(rq)->hwsp_offset; + *cs++ = rq->fence.seqno; + + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} + +static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = rq->fence.seqno; + + *cs++ = MI_USER_INTERRUPT; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} + +#define GEN7_XCS_WA 32 +static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + int i; + + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = rq->fence.seqno; + + for (i = 0; i < GEN7_XCS_WA; i++) { + *cs++ = MI_STORE_DWORD_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; + } + + *cs++ = MI_FLUSH_DW; + *cs++ = 0; + *cs++ = 0; + + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} +#undef GEN7_XCS_WA + +static void set_hwstam(struct intel_engine_cs *engine, u32 mask) +{ + /* + * Keep the render interrupt unmasked as this papers over + * lost interrupts following a reset. + */ + if (engine->class == RENDER_CLASS) { + if (INTEL_GEN(engine->i915) >= 6) + mask &= ~BIT(0); + else + mask &= ~I915_USER_INTERRUPT; + } + + intel_engine_set_hwsp_writemask(engine, mask); +} + +static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) +{ + struct drm_i915_private *dev_priv = engine->i915; + u32 addr; + + addr = lower_32_bits(phys); + if (INTEL_GEN(dev_priv) >= 4) + addr |= (phys >> 28) & 0xf0; + + I915_WRITE(HWS_PGA, addr); +} + +static struct page *status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj = engine->status_page.vma->obj; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + return sg_page(obj->mm.pages->sgl); +} + +static void ring_setup_phys_status_page(struct intel_engine_cs *engine) +{ + set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); + set_hwstam(engine, ~0u); +} + +static void set_hwsp(struct intel_engine_cs *engine, u32 offset) +{ + struct drm_i915_private *dev_priv = engine->i915; + i915_reg_t hwsp; + + /* + * The ring status page addresses are no longer next to the rest of + * the ring registers as of gen7. + */ + if (IS_GEN(dev_priv, 7)) { + switch (engine->id) { + /* + * No more rings exist on Gen7. Default case is only to shut up + * gcc switch check warning. + */ + default: + GEM_BUG_ON(engine->id); + /* fallthrough */ + case RCS0: + hwsp = RENDER_HWS_PGA_GEN7; + break; + case BCS0: + hwsp = BLT_HWS_PGA_GEN7; + break; + case VCS0: + hwsp = BSD_HWS_PGA_GEN7; + break; + case VECS0: + hwsp = VEBOX_HWS_PGA_GEN7; + break; + } + } else if (IS_GEN(dev_priv, 6)) { + hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); + } else { + hwsp = RING_HWS_PGA(engine->mmio_base); + } + + I915_WRITE(hwsp, offset); + POSTING_READ(hwsp); +} + +static void flush_cs_tlb(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + if (!IS_GEN_RANGE(dev_priv, 6, 7)) + return; + + /* ring should be idle before issuing a sync flush*/ + WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); + + ENGINE_WRITE(engine, RING_INSTPM, + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | + INSTPM_SYNC_FLUSH)); + if (intel_wait_for_register(engine->uncore, + RING_INSTPM(engine->mmio_base), + INSTPM_SYNC_FLUSH, 0, + 1000)) + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", + engine->name); +} + +static void ring_setup_status_page(struct intel_engine_cs *engine) +{ + set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); + set_hwstam(engine, ~0u); + + flush_cs_tlb(engine); +} + +static bool stop_ring(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + if (INTEL_GEN(dev_priv) > 2) { + ENGINE_WRITE(engine, + RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING)); + if (intel_wait_for_register(engine->uncore, + RING_MI_MODE(engine->mmio_base), + MODE_IDLE, + MODE_IDLE, + 1000)) { + DRM_ERROR("%s : timed out trying to stop ring\n", + engine->name); + + /* + * Sometimes we observe that the idle flag is not + * set even though the ring is empty. So double + * check before giving up. + */ + if (ENGINE_READ(engine, RING_HEAD) != + ENGINE_READ(engine, RING_TAIL)) + return false; + } + } + + ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); + + ENGINE_WRITE(engine, RING_HEAD, 0); + ENGINE_WRITE(engine, RING_TAIL, 0); + + /* The ring must be empty before it is disabled */ + ENGINE_WRITE(engine, RING_CTL, 0); + + return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; +} + +static int xcs_resume(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + struct intel_ring *ring = engine->legacy.ring; + int ret = 0; + + GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n", + engine->name, ring->head, ring->tail); + + intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); + + /* WaClearRingBufHeadRegAtInit:ctg,elk */ + if (!stop_ring(engine)) { + /* G45 ring initialization often fails to reset head to zero */ + DRM_DEBUG_DRIVER("%s head not reset to zero " + "ctl %08x head %08x tail %08x start %08x\n", + engine->name, + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_HEAD), + ENGINE_READ(engine, RING_TAIL), + ENGINE_READ(engine, RING_START)); + + if (!stop_ring(engine)) { + DRM_ERROR("failed to set %s head to zero " + "ctl %08x head %08x tail %08x start %08x\n", + engine->name, + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_HEAD), + ENGINE_READ(engine, RING_TAIL), + ENGINE_READ(engine, RING_START)); + ret = -EIO; + goto out; + } + } + + if (HWS_NEEDS_PHYSICAL(dev_priv)) + ring_setup_phys_status_page(engine); + else + ring_setup_status_page(engine); + + intel_engine_reset_breadcrumbs(engine); + + /* Enforce ordering by reading HEAD register back */ + ENGINE_POSTING_READ(engine, RING_HEAD); + + /* + * Initialize the ring. This must happen _after_ we've cleared the ring + * registers with the above sequence (the readback of the HEAD registers + * also enforces ordering), otherwise the hw might lose the new ring + * register values. + */ + ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma)); + + /* Check that the ring offsets point within the ring! */ + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); + intel_ring_update_space(ring); + + /* First wake the ring up to an empty/idle ring */ + ENGINE_WRITE(engine, RING_HEAD, ring->head); + ENGINE_WRITE(engine, RING_TAIL, ring->head); + ENGINE_POSTING_READ(engine, RING_TAIL); + + ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); + + /* If the head is still not zero, the ring is dead */ + if (intel_wait_for_register(engine->uncore, + RING_CTL(engine->mmio_base), + RING_VALID, RING_VALID, + 50)) { + DRM_ERROR("%s initialization failed " + "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", + engine->name, + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_CTL) & RING_VALID, + ENGINE_READ(engine, RING_HEAD), ring->head, + ENGINE_READ(engine, RING_TAIL), ring->tail, + ENGINE_READ(engine, RING_START), + i915_ggtt_offset(ring->vma)); + ret = -EIO; + goto out; + } + + if (INTEL_GEN(dev_priv) > 2) + ENGINE_WRITE(engine, + RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + + /* Now awake, let it get started */ + if (ring->tail != ring->head) { + ENGINE_WRITE(engine, RING_TAIL, ring->tail); + ENGINE_POSTING_READ(engine, RING_TAIL); + } + + /* Papering over lost _interrupts_ immediately following the restart */ + intel_engine_queue_breadcrumbs(engine); +out: + intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); + + return ret; +} + +static void reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_uncore *uncore = engine->uncore; + const u32 base = engine->mmio_base; + + /* + * We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * + * WaMediaResetMainRingCleanup:ctg,elk (presumably) + * + * FIXME: Wa for more modern gens needs to be validated + */ + GEM_TRACE("%s\n", engine->name); + + if (intel_engine_stop_cs(engine)) + GEM_TRACE("%s: timed out on STOP_RING\n", engine->name); + + intel_uncore_write_fw(uncore, + RING_HEAD(base), + intel_uncore_read_fw(uncore, RING_TAIL(base))); + intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ + + intel_uncore_write_fw(uncore, RING_HEAD(base), 0); + intel_uncore_write_fw(uncore, RING_TAIL(base), 0); + intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); + + /* The ring must be empty before it is disabled */ + intel_uncore_write_fw(uncore, RING_CTL(base), 0); + + /* Check acts as a post */ + if (intel_uncore_read_fw(uncore, RING_HEAD(base))) + GEM_TRACE("%s: ring head [%x] not parked\n", + engine->name, + intel_uncore_read_fw(uncore, RING_HEAD(base))); +} + +static void reset_ring(struct intel_engine_cs *engine, bool stalled) +{ + struct i915_request *pos, *rq; + unsigned long flags; + u32 head; + + rq = NULL; + spin_lock_irqsave(&engine->active.lock, flags); + list_for_each_entry(pos, &engine->active.requests, sched.link) { + if (!i915_request_completed(pos)) { + rq = pos; + break; + } + } + + /* + * The guilty request will get skipped on a hung engine. + * + * Users of client default contexts do not rely on logical + * state preserved between batches so it is safe to execute + * queued requests following the hang. Non default contexts + * rely on preserved state, so skipping a batch loses the + * evolution of the state and it needs to be considered corrupted. + * Executing more queued batches on top of corrupted state is + * risky. But we take the risk by trying to advance through + * the queued requests in order to make the client behaviour + * more predictable around resets, by not throwing away random + * amount of batches it has prepared for execution. Sophisticated + * clients can use gem_reset_stats_ioctl and dma fence status + * (exported via sync_file info ioctl on explicit fences) to observe + * when it loses the context state and should rebuild accordingly. + * + * The context ban, and ultimately the client ban, mechanism are safety + * valves if client submission ends up resulting in nothing more than + * subsequent hangs. + */ + + if (rq) { + /* + * Try to restore the logical GPU state to match the + * continuation of the request queue. If we skip the + * context/PD restore, then the next request may try to execute + * assuming that its context is valid and loaded on the GPU and + * so may try to access invalid memory, prompting repeated GPU + * hangs. + * + * If the request was guilty, we still restore the logical + * state in case the next request requires it (e.g. the + * aliasing ppgtt), but skip over the hung batch. + * + * If the request was innocent, we try to replay the request + * with the restored context. + */ + __i915_request_reset(rq, stalled); + + GEM_BUG_ON(rq->ring != engine->legacy.ring); + head = rq->head; + } else { + head = engine->legacy.ring->tail; + } + engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void reset_finish(struct intel_engine_cs *engine) +{ +} + +static int rcs_resume(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + /* + * Disable CONSTANT_BUFFER before it is loaded from the context + * image. For as it is loaded, it is executed and the stored + * address may no longer be valid, leading to a GPU hang. + * + * This imposes the requirement that userspace reload their + * CONSTANT_BUFFER on every batch, fortunately a requirement + * they are already accustomed to from before contexts were + * enabled. + */ + if (IS_GEN(dev_priv, 4)) + I915_WRITE(ECOSKPD, + _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE)); + + /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ + if (IS_GEN_RANGE(dev_priv, 4, 6)) + I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); + + /* We need to disable the AsyncFlip performance optimisations in order + * to use MI_WAIT_FOR_EVENT within the CS. It should already be + * programmed to '1' on all products. + * + * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv + */ + if (IS_GEN_RANGE(dev_priv, 6, 7)) + I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); + + /* Required for the hardware to program scanline values for waiting */ + /* WaEnableFlushTlbInvalidationMode:snb */ + if (IS_GEN(dev_priv, 6)) + I915_WRITE(GFX_MODE, + _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); + + /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ + if (IS_GEN(dev_priv, 7)) + I915_WRITE(GFX_MODE_GEN7, + _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | + _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); + + if (IS_GEN(dev_priv, 6)) { + /* From the Sandybridge PRM, volume 1 part 3, page 24: + * "If this bit is set, STCunit will have LRA as replacement + * policy. [...] This bit must be reset. LRA replacement + * policy is not supported." + */ + I915_WRITE(CACHE_MODE_0, + _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); + } + + if (IS_GEN_RANGE(dev_priv, 6, 7)) + I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); + + return xcs_resume(engine); +} + +static void cancel_requests(struct intel_engine_cs *engine) +{ + struct i915_request *request; + unsigned long flags; + + spin_lock_irqsave(&engine->active.lock, flags); + + /* Mark all submitted requests as skipped. */ + list_for_each_entry(request, &engine->active.requests, sched.link) { + if (!i915_request_signaled(request)) + dma_fence_set_error(&request->fence, -EIO); + + i915_request_mark_complete(request); + } + + /* Remaining _unready_ requests will be nop'ed when submitted */ + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void i9xx_submit_request(struct i915_request *request) +{ + i915_request_submit(request); + wmb(); /* paranoid flush writes out of the WCB before mmio */ + + ENGINE_WRITE(request->engine, RING_TAIL, + intel_ring_set_tail(request->ring, request->tail)); +} + +static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + + *cs++ = MI_FLUSH; + + *cs++ = MI_STORE_DWORD_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; + + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} + +#define GEN5_WA_STORES 8 /* must be at least 1! */ +static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + int i; + + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + + *cs++ = MI_FLUSH; + + BUILD_BUG_ON(GEN5_WA_STORES < 1); + for (i = 0; i < GEN5_WA_STORES; i++) { + *cs++ = MI_STORE_DWORD_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; + } + + *cs++ = MI_USER_INTERRUPT; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} +#undef GEN5_WA_STORES + +static void +gen5_irq_enable(struct intel_engine_cs *engine) +{ + gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); +} + +static void +gen5_irq_disable(struct intel_engine_cs *engine) +{ + gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); +} + +static void +i9xx_irq_enable(struct intel_engine_cs *engine) +{ + engine->i915->irq_mask &= ~engine->irq_enable_mask; + intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); + intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); +} + +static void +i9xx_irq_disable(struct intel_engine_cs *engine) +{ + engine->i915->irq_mask |= engine->irq_enable_mask; + intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); +} + +static void +i8xx_irq_enable(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + i915->irq_mask &= ~engine->irq_enable_mask; + intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); + ENGINE_POSTING_READ16(engine, RING_IMR); +} + +static void +i8xx_irq_disable(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + i915->irq_mask |= engine->irq_enable_mask; + intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); +} + +static int +bsd_ring_flush(struct i915_request *rq, u32 mode) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_FLUSH; + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + return 0; +} + +static void +gen6_irq_enable(struct intel_engine_cs *engine) +{ + ENGINE_WRITE(engine, RING_IMR, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + + /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ + ENGINE_POSTING_READ(engine, RING_IMR); + + gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); +} + +static void +gen6_irq_disable(struct intel_engine_cs *engine) +{ + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); + gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); +} + +static void +hsw_vebox_irq_enable(struct intel_engine_cs *engine) +{ + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask); + + /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ + ENGINE_POSTING_READ(engine, RING_IMR); + + gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask); +} + +static void +hsw_vebox_irq_disable(struct intel_engine_cs *engine) +{ + ENGINE_WRITE(engine, RING_IMR, ~0); + gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask); +} + +static int +i965_emit_bb_start(struct i915_request *rq, + u64 offset, u32 length, + unsigned int dispatch_flags) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & + I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965); + *cs++ = offset; + intel_ring_advance(rq, cs); + + return 0; +} + +/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ +#define I830_BATCH_LIMIT SZ_256K +#define I830_TLB_ENTRIES (2) +#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) +static int +i830_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + unsigned int dispatch_flags) +{ + u32 *cs, cs_offset = + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); + + GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE); + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Evict the invalid PTE TLBs */ + *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA; + *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096; + *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */ + *cs++ = cs_offset; + *cs++ = 0xdeadbeef; + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { + if (len > I830_BATCH_LIMIT) + return -ENOSPC; + + cs = intel_ring_begin(rq, 6 + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Blit the batch (which has now all relocs applied) to the + * stable batch scratch bo area (so that the CS never + * stumbles over its tlb invalidation bug) ... + */ + *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); + *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096; + *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096; + *cs++ = cs_offset; + *cs++ = 4096; + *cs++ = offset; + + *cs++ = MI_FLUSH; + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + /* ... and execute it. */ + offset = cs_offset; + } + + cs = intel_ring_begin(rq, 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; + *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : + MI_BATCH_NON_SECURE); + intel_ring_advance(rq, cs); + + return 0; +} + +static int +i915_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + unsigned int dispatch_flags) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; + *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : + MI_BATCH_NON_SECURE); + intel_ring_advance(rq, cs); + + return 0; +} + +static void __ring_context_fini(struct intel_context *ce) +{ + i915_vma_put(ce->state); +} + +static void ring_context_destroy(struct kref *ref) +{ + struct intel_context *ce = container_of(ref, typeof(*ce), ref); + + GEM_BUG_ON(intel_context_is_pinned(ce)); + + if (ce->state) + __ring_context_fini(ce); + + intel_context_fini(ce); + intel_context_free(ce); +} + +static struct i915_address_space *vm_alias(struct intel_context *ce) +{ + struct i915_address_space *vm; + + vm = ce->vm; + if (i915_is_ggtt(vm)) + vm = &i915_vm_to_ggtt(vm)->alias->vm; + + return vm; +} + +static int __context_pin_ppgtt(struct intel_context *ce) +{ + struct i915_address_space *vm; + int err = 0; + + vm = vm_alias(ce); + if (vm) + err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); + + return err; +} + +static void __context_unpin_ppgtt(struct intel_context *ce) +{ + struct i915_address_space *vm; + + vm = vm_alias(ce); + if (vm) + gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); +} + +static void ring_context_unpin(struct intel_context *ce) +{ + __context_unpin_ppgtt(ce); +} + +static struct i915_vma * +alloc_context_vma(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_shmem(i915, engine->context_size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + /* + * Try to make the context utilize L3 as well as LLC. + * + * On VLV we don't have L3 controls in the PTEs so we + * shouldn't touch the cache level, especially as that + * would make the object snooped which might have a + * negative performance impact. + * + * Snooping is required on non-llc platforms in execlist + * mode, but since all GGTT accesses use PAT entry 0 we + * get snooping anyway regardless of cache_level. + * + * This is only applicable for Ivy Bridge devices since + * later platforms don't have L3 control bits in the PTE. + */ + if (IS_IVYBRIDGE(i915)) + i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); + + if (engine->default_state) { + void *defaults, *vaddr; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + + defaults = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (IS_ERR(defaults)) { + err = PTR_ERR(defaults); + goto err_map; + } + + memcpy(vaddr, defaults, engine->context_size); + i915_gem_object_unpin_map(engine->default_state); + + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + } + + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + return vma; + +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static int ring_context_alloc(struct intel_context *ce) +{ + struct intel_engine_cs *engine = ce->engine; + + /* One ringbuffer to rule them all */ + GEM_BUG_ON(!engine->legacy.ring); + ce->ring = engine->legacy.ring; + ce->timeline = intel_timeline_get(engine->legacy.timeline); + + GEM_BUG_ON(ce->state); + if (engine->context_size) { + struct i915_vma *vma; + + vma = alloc_context_vma(engine); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + ce->state = vma; + } + + return 0; +} + +static int ring_context_pin(struct intel_context *ce) +{ + int err; + + err = intel_context_active_acquire(ce); + if (err) + return err; + + err = __context_pin_ppgtt(ce); + if (err) + goto err_active; + + return 0; + +err_active: + intel_context_active_release(ce); + return err; +} + +static void ring_context_reset(struct intel_context *ce) +{ + intel_ring_reset(ce->ring, 0); +} + +static const struct intel_context_ops ring_context_ops = { + .alloc = ring_context_alloc, + + .pin = ring_context_pin, + .unpin = ring_context_unpin, + + .enter = intel_context_enter_engine, + .exit = intel_context_exit_engine, + + .reset = ring_context_reset, + .destroy = ring_context_destroy, +}; + +static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt) +{ + const struct intel_engine_cs * const engine = rq->engine; + u32 *cs; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); + *cs++ = PP_DIR_DCLV_2G; + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); + *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; + + intel_ring_advance(rq, cs); + + return 0; +} + +static int flush_pd_dir(struct i915_request *rq) +{ + const struct intel_engine_cs * const engine = rq->engine; + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Stall until the page table load is complete */ + *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + return 0; +} + +static inline int mi_set_context(struct i915_request *rq, u32 flags) +{ + struct drm_i915_private *i915 = rq->i915; + struct intel_engine_cs *engine = rq->engine; + enum intel_engine_id id; + const int num_engines = + IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; + bool force_restore = false; + int len; + u32 *cs; + + flags |= MI_MM_SPACE_GTT; + if (IS_HASWELL(i915)) + /* These flags are for resource streamer on HSW+ */ + flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN; + else + /* We need to save the extended state for powersaving modes */ + flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; + + len = 4; + if (IS_GEN(i915, 7)) + len += 2 + (num_engines ? 4 * num_engines + 6 : 0); + else if (IS_GEN(i915, 5)) + len += 2; + if (flags & MI_FORCE_RESTORE) { + GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); + flags &= ~MI_FORCE_RESTORE; + force_restore = true; + len += 2; + } + + cs = intel_ring_begin(rq, len); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ + if (IS_GEN(i915, 7)) { + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + if (num_engines) { + struct intel_engine_cs *signaller; + + *cs++ = MI_LOAD_REGISTER_IMM(num_engines); + for_each_engine(signaller, engine->gt, id) { + if (signaller == engine) + continue; + + *cs++ = i915_mmio_reg_offset( + RING_PSMI_CTL(signaller->mmio_base)); + *cs++ = _MASKED_BIT_ENABLE( + GEN6_PSMI_SLEEP_MSG_DISABLE); + } + } + } else if (IS_GEN(i915, 5)) { + /* + * This w/a is only listed for pre-production ilk a/b steppings, + * but is also mentioned for programming the powerctx. To be + * safe, just apply the workaround; we do not use SyncFlush so + * this should never take effect and so be a no-op! + */ + *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; + } + + if (force_restore) { + /* + * The HW doesn't handle being told to restore the current + * context very well. Quite often it likes goes to go off and + * sulk, especially when it is meant to be reloading PP_DIR. + * A very simple fix to force the reload is to simply switch + * away from the current context and back again. + * + * Note that the kernel_context will contain random state + * following the INHIBIT_RESTORE. We accept this since we + * never use the kernel_context state; it is merely a + * placeholder we use to flush other contexts. + */ + *cs++ = MI_SET_CONTEXT; + *cs++ = i915_ggtt_offset(engine->kernel_context->state) | + MI_MM_SPACE_GTT | + MI_RESTORE_INHIBIT; + } + + *cs++ = MI_NOOP; + *cs++ = MI_SET_CONTEXT; + *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags; + /* + * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP + * WaMiSetContext_Hang:snb,ivb,vlv + */ + *cs++ = MI_NOOP; + + if (IS_GEN(i915, 7)) { + if (num_engines) { + struct intel_engine_cs *signaller; + i915_reg_t last_reg = {}; /* keep gcc quiet */ + + *cs++ = MI_LOAD_REGISTER_IMM(num_engines); + for_each_engine(signaller, engine->gt, id) { + if (signaller == engine) + continue; + + last_reg = RING_PSMI_CTL(signaller->mmio_base); + *cs++ = i915_mmio_reg_offset(last_reg); + *cs++ = _MASKED_BIT_DISABLE( + GEN6_PSMI_SLEEP_MSG_DISABLE); + } + + /* Insert a delay before the next switch! */ + *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(last_reg); + *cs++ = intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); + *cs++ = MI_NOOP; + } + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + } else if (IS_GEN(i915, 5)) { + *cs++ = MI_SUSPEND_FLUSH; + } + + intel_ring_advance(rq, cs); + + return 0; +} + +static int remap_l3_slice(struct i915_request *rq, int slice) +{ + u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; + int i; + + if (!remap_info) + return 0; + + cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Note: We do not worry about the concurrent register cacheline hang + * here because no other code should access these registers other than + * at initialization time. + */ + *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4); + for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { + *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); + *cs++ = remap_info[i]; + } + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + return 0; +} + +static int remap_l3(struct i915_request *rq) +{ + struct i915_gem_context *ctx = rq->gem_context; + int i, err; + + if (!ctx->remap_slice) + return 0; + + for (i = 0; i < MAX_L3_SLICES; i++) { + if (!(ctx->remap_slice & BIT(i))) + continue; + + err = remap_l3_slice(rq, i); + if (err) + return err; + } + + ctx->remap_slice = 0; + return 0; +} + +static int switch_context(struct i915_request *rq) +{ + struct intel_context *ce = rq->hw_context; + struct i915_address_space *vm = vm_alias(ce); + int ret; + + GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); + + if (vm) { + ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm)); + if (ret) + return ret; + } + + if (ce->state) { + u32 hw_flags; + + GEM_BUG_ON(rq->engine->id != RCS0); + + /* + * The kernel context(s) is treated as pure scratch and is not + * expected to retain any state (as we sacrifice it during + * suspend and on resume it may be corrupted). This is ok, + * as nothing actually executes using the kernel context; it + * is purely used for flushing user contexts. + */ + hw_flags = 0; + if (i915_gem_context_is_kernel(rq->gem_context)) + hw_flags = MI_RESTORE_INHIBIT; + + ret = mi_set_context(rq, hw_flags); + if (ret) + return ret; + } + + if (vm) { + struct intel_engine_cs *engine = rq->engine; + + ret = engine->emit_flush(rq, EMIT_INVALIDATE); + if (ret) + return ret; + + ret = flush_pd_dir(rq); + if (ret) + return ret; + + /* + * Not only do we need a full barrier (post-sync write) after + * invalidating the TLBs, but we need to wait a little bit + * longer. Whether this is merely delaying us, or the + * subsequent flush is a key part of serialising with the + * post-sync op, this extra pass appears vital before a + * mm switch! + */ + ret = engine->emit_flush(rq, EMIT_INVALIDATE); + if (ret) + return ret; + + ret = engine->emit_flush(rq, EMIT_FLUSH); + if (ret) + return ret; + } + + ret = remap_l3(rq); + if (ret) + return ret; + + return 0; +} + +static int ring_request_alloc(struct i915_request *request) +{ + int ret; + + GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); + GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); + + /* + * Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. + */ + request->reserved_space += LEGACY_REQUEST_SIZE; + + /* Unconditionally invalidate GPU caches and TLBs. */ + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); + if (ret) + return ret; + + ret = switch_context(request); + if (ret) + return ret; + + request->reserved_space -= LEGACY_REQUEST_SIZE; + return 0; +} + +static void gen6_bsd_submit_request(struct i915_request *request) +{ + struct intel_uncore *uncore = request->engine->uncore; + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + /* Every tail move must follow the sequence below */ + + /* Disable notification that the ring is IDLE. The GT + * will then assume that it is busy and bring it out of rc6. + */ + intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + + /* Clear the context id. Here be magic! */ + intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); + + /* Wait for the ring not to be idle, i.e. for it to wake up. */ + if (__intel_wait_for_register_fw(uncore, + GEN6_BSD_SLEEP_PSMI_CONTROL, + GEN6_BSD_SLEEP_INDICATOR, + 0, + 1000, 0, NULL)) + DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); + + /* Now that the ring is fully powered up, update the tail */ + i9xx_submit_request(request); + + /* Let the ring send IDLE messages to the GT again, + * and so let it sleep to conserve power when idle. + */ + intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); +} + +static int mi_flush_dw(struct i915_request *rq, u32 flags) +{ + u32 cmd, *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cmd = MI_FLUSH_DW; + + /* + * We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + + /* + * Bspec vol 1c.3 - blitter engine command streamer: + * "If ENABLED, all TLBs will be invalidated once the flush + * operation is complete. This bit is only valid when the + * Post-Sync Operation field is a value of 1h or 3h." + */ + cmd |= flags; + + *cs++ = cmd; + *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = 0; + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + return 0; +} + +static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) +{ + return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0); +} + +static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) +{ + return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD); +} + +static int +hsw_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + unsigned int dispatch_flags) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? + 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW); + /* bit0-7 is the length on GEN6+ */ + *cs++ = offset; + intel_ring_advance(rq, cs); + + return 0; +} + +static int +gen6_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + unsigned int dispatch_flags) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? + 0 : MI_BATCH_NON_SECURE_I965); + /* bit0-7 is the length on GEN6+ */ + *cs++ = offset; + intel_ring_advance(rq, cs); + + return 0; +} + +/* Blitter support (SandyBridge+) */ + +static int gen6_ring_flush(struct i915_request *rq, u32 mode) +{ + return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB); +} + +static void i9xx_set_default_submission(struct intel_engine_cs *engine) +{ + engine->submit_request = i9xx_submit_request; + engine->cancel_requests = cancel_requests; + + engine->park = NULL; + engine->unpark = NULL; +} + +static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) +{ + i9xx_set_default_submission(engine); + engine->submit_request = gen6_bsd_submit_request; +} + +static void ring_destroy(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + WARN_ON(INTEL_GEN(dev_priv) > 2 && + (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); + + intel_engine_cleanup_common(engine); + + intel_ring_unpin(engine->legacy.ring); + intel_ring_put(engine->legacy.ring); + + intel_timeline_unpin(engine->legacy.timeline); + intel_timeline_put(engine->legacy.timeline); + + kfree(engine); +} + +static void setup_irq(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + if (INTEL_GEN(i915) >= 6) { + engine->irq_enable = gen6_irq_enable; + engine->irq_disable = gen6_irq_disable; + } else if (INTEL_GEN(i915) >= 5) { + engine->irq_enable = gen5_irq_enable; + engine->irq_disable = gen5_irq_disable; + } else if (INTEL_GEN(i915) >= 3) { + engine->irq_enable = i9xx_irq_enable; + engine->irq_disable = i9xx_irq_disable; + } else { + engine->irq_enable = i8xx_irq_enable; + engine->irq_disable = i8xx_irq_disable; + } +} + +static void setup_common(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + /* gen8+ are only supported with execlists */ + GEM_BUG_ON(INTEL_GEN(i915) >= 8); + + setup_irq(engine); + + engine->destroy = ring_destroy; + + engine->resume = xcs_resume; + engine->reset.prepare = reset_prepare; + engine->reset.reset = reset_ring; + engine->reset.finish = reset_finish; + + engine->cops = &ring_context_ops; + engine->request_alloc = ring_request_alloc; + + /* + * Using a global execution timeline; the previous final breadcrumb is + * equivalent to our next initial bread so we can elide + * engine->emit_init_breadcrumb(). + */ + engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb; + if (IS_GEN(i915, 5)) + engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; + + engine->set_default_submission = i9xx_set_default_submission; + + if (INTEL_GEN(i915) >= 6) + engine->emit_bb_start = gen6_emit_bb_start; + else if (INTEL_GEN(i915) >= 4) + engine->emit_bb_start = i965_emit_bb_start; + else if (IS_I830(i915) || IS_I845G(i915)) + engine->emit_bb_start = i830_emit_bb_start; + else + engine->emit_bb_start = i915_emit_bb_start; +} + +static void setup_rcs(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + if (HAS_L3_DPF(i915)) + engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; + + if (INTEL_GEN(i915) >= 7) { + engine->emit_flush = gen7_render_ring_flush; + engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb; + } else if (IS_GEN(i915, 6)) { + engine->emit_flush = gen6_render_ring_flush; + engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb; + } else if (IS_GEN(i915, 5)) { + engine->emit_flush = gen4_render_ring_flush; + } else { + if (INTEL_GEN(i915) < 4) + engine->emit_flush = gen2_render_ring_flush; + else + engine->emit_flush = gen4_render_ring_flush; + engine->irq_enable_mask = I915_USER_INTERRUPT; + } + + if (IS_HASWELL(i915)) + engine->emit_bb_start = hsw_emit_bb_start; + + engine->resume = rcs_resume; +} + +static void setup_vcs(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + if (INTEL_GEN(i915) >= 6) { + /* gen6 bsd needs a special wa for tail updates */ + if (IS_GEN(i915, 6)) + engine->set_default_submission = gen6_bsd_set_default_submission; + engine->emit_flush = gen6_bsd_ring_flush; + engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; + + if (IS_GEN(i915, 6)) + engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; + else + engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; + } else { + engine->emit_flush = bsd_ring_flush; + if (IS_GEN(i915, 5)) + engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; + else + engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; + } +} + +static void setup_bcs(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + engine->emit_flush = gen6_ring_flush; + engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; + + if (IS_GEN(i915, 6)) + engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; + else + engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; +} + +static void setup_vecs(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + GEM_BUG_ON(INTEL_GEN(i915) < 7); + + engine->emit_flush = gen6_ring_flush; + engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; + engine->irq_enable = hsw_vebox_irq_enable; + engine->irq_disable = hsw_vebox_irq_disable; + + engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; +} + +int intel_ring_submission_setup(struct intel_engine_cs *engine) +{ + setup_common(engine); + + switch (engine->class) { + case RENDER_CLASS: + setup_rcs(engine); + break; + case VIDEO_DECODE_CLASS: + setup_vcs(engine); + break; + case COPY_ENGINE_CLASS: + setup_bcs(engine); + break; + case VIDEO_ENHANCEMENT_CLASS: + setup_vecs(engine); + break; + default: + MISSING_CASE(engine->class); + return -ENODEV; + } + + return 0; +} + +int intel_ring_submission_init(struct intel_engine_cs *engine) +{ + struct intel_timeline *timeline; + struct intel_ring *ring; + int err; + + timeline = intel_timeline_create(engine->gt, engine->status_page.vma); + if (IS_ERR(timeline)) { + err = PTR_ERR(timeline); + goto err; + } + GEM_BUG_ON(timeline->has_initial_breadcrumb); + + err = intel_timeline_pin(timeline); + if (err) + goto err_timeline; + + ring = intel_engine_create_ring(engine, SZ_16K); + if (IS_ERR(ring)) { + err = PTR_ERR(ring); + goto err_timeline_unpin; + } + + err = intel_ring_pin(ring); + if (err) + goto err_ring; + + GEM_BUG_ON(engine->legacy.ring); + engine->legacy.ring = ring; + engine->legacy.timeline = timeline; + + err = intel_engine_init_common(engine); + if (err) + goto err_ring_unpin; + + GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); + + return 0; + +err_ring_unpin: + intel_ring_unpin(ring); +err_ring: + intel_ring_put(ring); +err_timeline_unpin: + intel_timeline_unpin(timeline); +err_timeline: + intel_timeline_put(timeline); +err: + intel_engine_cleanup_common(engine); + return err; +} diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h new file mode 100644 index 000000000000..d9f17f38e0cc --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RING_TYPES_H +#define INTEL_RING_TYPES_H + +#include <linux/atomic.h> +#include <linux/kref.h> +#include <linux/types.h> + +/* + * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, + * but keeps the logic simple. Indeed, the whole purpose of this macro is just + * to give some inclination as to some of the magic values used in the various + * workarounds! + */ +#define CACHELINE_BYTES 64 +#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) + +struct i915_vma; + +struct intel_ring { + struct kref ref; + struct i915_vma *vma; + void *vaddr; + + /* + * As we have two types of rings, one global to the engine used + * by ringbuffer submission and those that are exclusive to a + * context used by execlists, we have to play safe and allow + * atomic updates to the pin_count. However, the actual pinning + * of the context is either done during initialisation for + * ringbuffer submission or serialised as part of the context + * pinning for execlists, and so we do not need a mutex ourselves + * to serialise intel_ring_pin/intel_ring_unpin. + */ + atomic_t pin_count; + + u32 head; + u32 tail; + u32 emit; + + u32 space; + u32 size; + u32 effective_size; +}; + +#endif /* INTEL_RING_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c new file mode 100644 index 000000000000..20d6ee148afc --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -0,0 +1,1872 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_gt_irq.h" +#include "intel_gt_pm_irq.h" +#include "intel_rps.h" +#include "intel_sideband.h" +#include "../../../platform/x86/intel_ips.h" + +/* + * Lock protecting IPS related data structures + */ +static DEFINE_SPINLOCK(mchdev_lock); + +static struct intel_gt *rps_to_gt(struct intel_rps *rps) +{ + return container_of(rps, struct intel_gt, rps); +} + +static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) +{ + return rps_to_gt(rps)->i915; +} + +static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) +{ + return rps_to_gt(rps)->uncore; +} + +static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) +{ + return mask & ~rps->pm_intrmsk_mbz; +} + +static u32 rps_pm_mask(struct intel_rps *rps, u8 val) +{ + u32 mask = 0; + + /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ + if (val > rps->min_freq_softlimit) + mask |= (GEN6_PM_RP_UP_EI_EXPIRED | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); + + if (val < rps->max_freq_softlimit) + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; + + mask &= rps->pm_events; + + return rps_pm_sanitize_mask(rps, ~mask); +} + +static void rps_reset_ei(struct intel_rps *rps) +{ + memset(&rps->ei, 0, sizeof(rps->ei)); +} + +static void rps_enable_interrupts(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + rps_reset_ei(rps); + + if (IS_VALLEYVIEW(gt->i915)) + /* WaGsvRC0ResidencyMethod:vlv */ + rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; + else + rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); + + spin_lock_irq(>->irq_lock); + gen6_gt_pm_enable_irq(gt, rps->pm_events); + spin_unlock_irq(>->irq_lock); + + intel_uncore_write(gt->uncore, GEN6_PMINTRMSK, + rps_pm_mask(rps, rps->cur_freq)); +} + +static void gen6_rps_reset_interrupts(struct intel_rps *rps) +{ + gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); +} + +static void gen11_rps_reset_interrupts(struct intel_rps *rps) +{ + while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) + ; +} + +static void rps_reset_interrupts(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + spin_lock_irq(>->irq_lock); + if (INTEL_GEN(gt->i915) >= 11) + gen11_rps_reset_interrupts(rps); + else + gen6_rps_reset_interrupts(rps); + + rps->pm_iir = 0; + spin_unlock_irq(>->irq_lock); +} + +static void rps_disable_interrupts(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + rps->pm_events = 0; + + intel_uncore_write(gt->uncore, GEN6_PMINTRMSK, + rps_pm_sanitize_mask(rps, ~0u)); + + spin_lock_irq(>->irq_lock); + gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); + spin_unlock_irq(>->irq_lock); + + intel_synchronize_irq(gt->i915); + + /* + * Now that we will not be generating any more work, flush any + * outstanding tasks. As we are called on the RPS idle path, + * we will reset the GPU to minimum frequencies, so the current + * state of the worker can be discarded. + */ + cancel_work_sync(&rps->work); + + rps_reset_interrupts(rps); +} + +static const struct cparams { + u16 i; + u16 t; + u16 m; + u16 c; +} cparams[] = { + { 1, 1333, 301, 28664 }, + { 1, 1066, 294, 24460 }, + { 1, 800, 294, 25192 }, + { 0, 1333, 276, 27605 }, + { 0, 1066, 276, 27605 }, + { 0, 800, 231, 23784 }, +}; + +static void gen5_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + u8 fmax, fmin, fstart; + u32 rgvmodectl; + int c_m, i; + + if (i915->fsb_freq <= 3200) + c_m = 0; + else if (i915->fsb_freq <= 4800) + c_m = 1; + else + c_m = 2; + + for (i = 0; i < ARRAY_SIZE(cparams); i++) { + if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) { + rps->ips.m = cparams[i].m; + rps->ips.c = cparams[i].c; + break; + } + } + + rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); + + /* Set up min, max, and cur for interrupt handling */ + fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; + fmin = (rgvmodectl & MEMMODE_FMIN_MASK); + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", + fmax, fmin, fstart); + + rps->min_freq = fmax; + rps->max_freq = fmin; + + rps->idle_freq = rps->min_freq; + rps->cur_freq = rps->idle_freq; +} + +static unsigned long +__ips_chipset_val(struct intel_ips *ips) +{ + struct intel_uncore *uncore = + rps_to_uncore(container_of(ips, struct intel_rps, ips)); + unsigned long now = jiffies_to_msecs(jiffies), dt; + unsigned long result; + u64 total, delta; + + lockdep_assert_held(&mchdev_lock); + + /* + * Prevent division-by-zero if we are asking too fast. + * Also, we don't get interesting results if we are polling + * faster than once in 10ms, so just return the saved value + * in such cases. + */ + dt = now - ips->last_time1; + if (dt <= 10) + return ips->chipset_power; + + /* FIXME: handle per-counter overflow */ + total = intel_uncore_read(uncore, DMIEC); + total += intel_uncore_read(uncore, DDREC); + total += intel_uncore_read(uncore, CSIEC); + + delta = total - ips->last_count1; + + result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); + + ips->last_count1 = total; + ips->last_time1 = now; + + ips->chipset_power = result; + + return result; +} + +static unsigned long ips_mch_val(struct intel_uncore *uncore) +{ + unsigned int m, x, b; + u32 tsfs; + + tsfs = intel_uncore_read(uncore, TSFS); + x = intel_uncore_read8(uncore, TR1); + + b = tsfs & TSFS_INTR_MASK; + m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; + + return m * x / 127 - b; +} + +static int _pxvid_to_vd(u8 pxvid) +{ + if (pxvid == 0) + return 0; + + if (pxvid >= 8 && pxvid < 31) + pxvid = 31; + + return (pxvid + 2) * 125; +} + +static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) +{ + const int vd = _pxvid_to_vd(pxvid); + + if (INTEL_INFO(i915)->is_mobile) + return max(vd - 1125, 0); + + return vd; +} + +static void __gen5_ips_update(struct intel_ips *ips) +{ + struct intel_uncore *uncore = + rps_to_uncore(container_of(ips, struct intel_rps, ips)); + u64 now, delta, dt; + u32 count; + + lockdep_assert_held(&mchdev_lock); + + now = ktime_get_raw_ns(); + dt = now - ips->last_time2; + do_div(dt, NSEC_PER_MSEC); + + /* Don't divide by 0 */ + if (dt <= 10) + return; + + count = intel_uncore_read(uncore, GFXEC); + delta = count - ips->last_count2; + + ips->last_count2 = count; + ips->last_time2 = now; + + /* More magic constants... */ + ips->gfx_power = div_u64(delta * 1181, dt * 10); +} + +static void gen5_rps_update(struct intel_rps *rps) +{ + spin_lock_irq(&mchdev_lock); + __gen5_ips_update(&rps->ips); + spin_unlock_irq(&mchdev_lock); +} + +static bool gen5_rps_set(struct intel_rps *rps, u8 val) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u16 rgvswctl; + + lockdep_assert_held(&mchdev_lock); + + rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); + if (rgvswctl & MEMCTL_CMD_STS) { + DRM_DEBUG("gpu busy, RCS change rejected\n"); + return false; /* still busy with another command */ + } + + /* Invert the frequency bin into an ips delay */ + val = rps->max_freq - val; + val = rps->min_freq + val; + + rgvswctl = + (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (val << MEMCTL_FREQ_SHIFT) | + MEMCTL_SFCAVM; + intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); + intel_uncore_posting_read16(uncore, MEMSWCTL); + + rgvswctl |= MEMCTL_CMD_STS; + intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); + + return true; +} + +static unsigned long intel_pxfreq(u32 vidfreq) +{ + int div = (vidfreq & 0x3f0000) >> 16; + int post = (vidfreq & 0x3000) >> 12; + int pre = (vidfreq & 0x7); + + if (!pre) + return 0; + + return div * 133333 / (pre << post); +} + +static unsigned int init_emon(struct intel_uncore *uncore) +{ + u8 pxw[16]; + int i; + + /* Disable to program */ + intel_uncore_write(uncore, ECR, 0); + intel_uncore_posting_read(uncore, ECR); + + /* Program energy weights for various events */ + intel_uncore_write(uncore, SDEW, 0x15040d00); + intel_uncore_write(uncore, CSIEW0, 0x007f0000); + intel_uncore_write(uncore, CSIEW1, 0x1e220004); + intel_uncore_write(uncore, CSIEW2, 0x04000004); + + for (i = 0; i < 5; i++) + intel_uncore_write(uncore, PEW(i), 0); + for (i = 0; i < 3; i++) + intel_uncore_write(uncore, DEW(i), 0); + + /* Program P-state weights to account for frequency power adjustment */ + for (i = 0; i < 16; i++) { + u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); + unsigned int freq = intel_pxfreq(pxvidfreq); + unsigned int vid = + (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; + unsigned int val; + + val = vid * vid * freq / 1000 * 255; + val /= 127 * 127 * 900; + + pxw[i] = val; + } + /* Render standby states get 0 weight */ + pxw[14] = 0; + pxw[15] = 0; + + for (i = 0; i < 4; i++) { + intel_uncore_write(uncore, PXW(i), + pxw[i * 4 + 0] << 24 | + pxw[i * 4 + 1] << 16 | + pxw[i * 4 + 2] << 8 | + pxw[i * 4 + 3] << 0); + } + + /* Adjust magic regs to magic values (more experimental results) */ + intel_uncore_write(uncore, OGW0, 0); + intel_uncore_write(uncore, OGW1, 0); + intel_uncore_write(uncore, EG0, 0x00007f00); + intel_uncore_write(uncore, EG1, 0x0000000e); + intel_uncore_write(uncore, EG2, 0x000e0000); + intel_uncore_write(uncore, EG3, 0x68000300); + intel_uncore_write(uncore, EG4, 0x42000000); + intel_uncore_write(uncore, EG5, 0x00140031); + intel_uncore_write(uncore, EG6, 0); + intel_uncore_write(uncore, EG7, 0); + + for (i = 0; i < 8; i++) + intel_uncore_write(uncore, PXWL(i), 0); + + /* Enable PMON + select events */ + intel_uncore_write(uncore, ECR, 0x80000019); + + return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; +} + +static bool gen5_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u8 fstart, vstart; + u32 rgvmodectl; + + spin_lock_irq(&mchdev_lock); + + rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); + + /* Enable temp reporting */ + intel_uncore_write16(uncore, PMMISC, + intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); + intel_uncore_write16(uncore, TSC1, + intel_uncore_read16(uncore, TSC1) | TSE); + + /* 100ms RC evaluation intervals */ + intel_uncore_write(uncore, RCUPEI, 100000); + intel_uncore_write(uncore, RCDNEI, 100000); + + /* Set max/min thresholds to 90ms and 80ms respectively */ + intel_uncore_write(uncore, RCBMAXAVG, 90000); + intel_uncore_write(uncore, RCBMINAVG, 80000); + + intel_uncore_write(uncore, MEMIHYST, 1); + + /* Set up min, max, and cur for interrupt handling */ + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + + vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & + PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; + + intel_uncore_write(uncore, + MEMINTREN, + MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + + intel_uncore_write(uncore, VIDSTART, vstart); + intel_uncore_posting_read(uncore, VIDSTART); + + rgvmodectl |= MEMMODE_SWMODE_EN; + intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); + + if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & + MEMCTL_CMD_STS) == 0, 10)) + DRM_ERROR("stuck trying to change perf mode\n"); + mdelay(1); + + gen5_rps_set(rps, rps->cur_freq); + + rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); + rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); + rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); + rps->ips.last_time1 = jiffies_to_msecs(jiffies); + + rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); + rps->ips.last_time2 = ktime_get_raw_ns(); + + spin_unlock_irq(&mchdev_lock); + + rps->ips.corr = init_emon(uncore); + + return true; +} + +static void gen5_rps_disable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u16 rgvswctl; + + spin_lock_irq(&mchdev_lock); + + rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); + + /* Ack interrupts, disable EFC interrupt */ + intel_uncore_write(uncore, MEMINTREN, + intel_uncore_read(uncore, MEMINTREN) & + ~MEMINT_EVAL_CHG_EN); + intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); + intel_uncore_write(uncore, DEIER, + intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT); + intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT); + intel_uncore_write(uncore, DEIMR, + intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT); + + /* Go back to the starting frequency */ + gen5_rps_set(rps, rps->idle_freq); + mdelay(1); + rgvswctl |= MEMCTL_CMD_STS; + intel_uncore_write(uncore, MEMSWCTL, rgvswctl); + mdelay(1); + + spin_unlock_irq(&mchdev_lock); +} + +static u32 rps_limits(struct intel_rps *rps, u8 val) +{ + u32 limits; + + /* + * Only set the down limit when we've reached the lowest level to avoid + * getting more interrupts, otherwise leave this clear. This prevents a + * race in the hw when coming out of rc6: There's a tiny window where + * the hw runs at the minimal clock before selecting the desired + * frequency, if the down threshold expires in that window we will not + * receive a down interrupt. + */ + if (INTEL_GEN(rps_to_i915(rps)) >= 9) { + limits = rps->max_freq_softlimit << 23; + if (val <= rps->min_freq_softlimit) + limits |= rps->min_freq_softlimit << 14; + } else { + limits = rps->max_freq_softlimit << 24; + if (val <= rps->min_freq_softlimit) + limits |= rps->min_freq_softlimit << 16; + } + + return limits; +} + +static void rps_set_power(struct intel_rps *rps, int new_power) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 threshold_up = 0, threshold_down = 0; /* in % */ + u32 ei_up = 0, ei_down = 0; + + lockdep_assert_held(&rps->power.mutex); + + if (new_power == rps->power.mode) + return; + + /* Note the units here are not exactly 1us, but 1280ns. */ + switch (new_power) { + case LOW_POWER: + /* Upclock if more than 95% busy over 16ms */ + ei_up = 16000; + threshold_up = 95; + + /* Downclock if less than 85% busy over 32ms */ + ei_down = 32000; + threshold_down = 85; + break; + + case BETWEEN: + /* Upclock if more than 90% busy over 13ms */ + ei_up = 13000; + threshold_up = 90; + + /* Downclock if less than 75% busy over 32ms */ + ei_down = 32000; + threshold_down = 75; + break; + + case HIGH_POWER: + /* Upclock if more than 85% busy over 10ms */ + ei_up = 10000; + threshold_up = 85; + + /* Downclock if less than 60% busy over 32ms */ + ei_down = 32000; + threshold_down = 60; + break; + } + + /* When byt can survive without system hang with dynamic + * sw freq adjustments, this restriction can be lifted. + */ + if (IS_VALLEYVIEW(i915)) + goto skip_hw_write; + + intel_uncore_write(uncore, GEN6_RP_UP_EI, + GT_INTERVAL_FROM_US(i915, ei_up)); + intel_uncore_write(uncore, GEN6_RP_UP_THRESHOLD, + GT_INTERVAL_FROM_US(i915, + ei_up * threshold_up / 100)); + + intel_uncore_write(uncore, GEN6_RP_DOWN_EI, + GT_INTERVAL_FROM_US(i915, ei_down)); + intel_uncore_write(uncore, GEN6_RP_DOWN_THRESHOLD, + GT_INTERVAL_FROM_US(i915, + ei_down * threshold_down / 100)); + + intel_uncore_write(uncore, GEN6_RP_CONTROL, + (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + +skip_hw_write: + rps->power.mode = new_power; + rps->power.up_threshold = threshold_up; + rps->power.down_threshold = threshold_down; +} + +static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) +{ + int new_power; + + new_power = rps->power.mode; + switch (rps->power.mode) { + case LOW_POWER: + if (val > rps->efficient_freq + 1 && + val > rps->cur_freq) + new_power = BETWEEN; + break; + + case BETWEEN: + if (val <= rps->efficient_freq && + val < rps->cur_freq) + new_power = LOW_POWER; + else if (val >= rps->rp0_freq && + val > rps->cur_freq) + new_power = HIGH_POWER; + break; + + case HIGH_POWER: + if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && + val < rps->cur_freq) + new_power = BETWEEN; + break; + } + /* Max/min bins are special */ + if (val <= rps->min_freq_softlimit) + new_power = LOW_POWER; + if (val >= rps->max_freq_softlimit) + new_power = HIGH_POWER; + + mutex_lock(&rps->power.mutex); + if (rps->power.interactive) + new_power = HIGH_POWER; + rps_set_power(rps, new_power); + mutex_unlock(&rps->power.mutex); +} + +void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) +{ + mutex_lock(&rps->power.mutex); + if (interactive) { + if (!rps->power.interactive++ && rps->active) + rps_set_power(rps, HIGH_POWER); + } else { + GEM_BUG_ON(!rps->power.interactive); + rps->power.interactive--; + } + mutex_unlock(&rps->power.mutex); +} + +static int gen6_rps_set(struct intel_rps *rps, u8 val) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 swreq; + + if (INTEL_GEN(i915) >= 9) + swreq = GEN9_FREQUENCY(val); + else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + swreq = HSW_FREQUENCY(val); + else + swreq = (GEN6_FREQUENCY(val) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + intel_uncore_write(uncore, GEN6_RPNSWREQ, swreq); + + return 0; +} + +static int vlv_rps_set(struct intel_rps *rps, u8 val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + int err; + + vlv_punit_get(i915); + err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); + vlv_punit_put(i915); + + return err; +} + +static int rps_set(struct intel_rps *rps, u8 val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + int err; + + if (INTEL_GEN(i915) < 6) + return 0; + + if (val == rps->last_freq) + return 0; + + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + err = vlv_rps_set(rps, val); + else + err = gen6_rps_set(rps, val); + if (err) + return err; + + gen6_rps_set_thresholds(rps, val); + rps->last_freq = val; + + return 0; +} + +void intel_rps_unpark(struct intel_rps *rps) +{ + u8 freq; + + if (!rps->enabled) + return; + + /* + * Use the user's desired frequency as a guide, but for better + * performance, jump directly to RPe as our starting frequency. + */ + mutex_lock(&rps->lock); + rps->active = true; + freq = max(rps->cur_freq, rps->efficient_freq), + freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit); + intel_rps_set(rps, freq); + rps->last_adj = 0; + mutex_unlock(&rps->lock); + + if (INTEL_GEN(rps_to_i915(rps)) >= 6) + rps_enable_interrupts(rps); + + if (IS_GEN(rps_to_i915(rps), 5)) + gen5_rps_update(rps); +} + +void intel_rps_park(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (!rps->enabled) + return; + + if (INTEL_GEN(i915) >= 6) + rps_disable_interrupts(rps); + + rps->active = false; + if (rps->last_freq <= rps->idle_freq) + return; + + /* + * The punit delays the write of the frequency and voltage until it + * determines the GPU is awake. During normal usage we don't want to + * waste power changing the frequency if the GPU is sleeping (rc6). + * However, the GPU and driver is now idle and we do not want to delay + * switching to minimum voltage (reducing power whilst idle) as we do + * not expect to be woken in the near future and so must flush the + * change by waking the device. + * + * We choose to take the media powerwell (either would do to trick the + * punit into committing the voltage change) as that takes a lot less + * power than the render powerwell. + */ + intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); + rps_set(rps, rps->idle_freq); + intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); +} + +void intel_rps_boost(struct i915_request *rq) +{ + struct intel_rps *rps = &rq->engine->gt->rps; + unsigned long flags; + + if (i915_request_signaled(rq) || !rps->active) + return; + + /* Serializes with i915_request_retire() */ + spin_lock_irqsave(&rq->lock, flags); + if (!i915_request_has_waitboost(rq) && + !dma_fence_is_signaled_locked(&rq->fence)) { + rq->flags |= I915_REQUEST_WAITBOOST; + + if (!atomic_fetch_inc(&rps->num_waiters) && + READ_ONCE(rps->cur_freq) < rps->boost_freq) + schedule_work(&rps->work); + + atomic_inc(&rps->boosts); + } + spin_unlock_irqrestore(&rq->lock, flags); +} + +int intel_rps_set(struct intel_rps *rps, u8 val) +{ + int err = 0; + + lockdep_assert_held(&rps->lock); + GEM_BUG_ON(val > rps->max_freq); + GEM_BUG_ON(val < rps->min_freq); + + if (rps->active) { + err = rps_set(rps, val); + + /* + * Make sure we continue to get interrupts + * until we hit the minimum or maximum frequencies. + */ + if (INTEL_GEN(rps_to_i915(rps)) >= 6) { + struct intel_uncore *uncore = rps_to_uncore(rps); + + intel_uncore_write(uncore, GEN6_RP_INTERRUPT_LIMITS, + rps_limits(rps, val)); + + intel_uncore_write(uncore, GEN6_PMINTRMSK, + rps_pm_mask(rps, val)); + } + } + + if (err == 0) + rps->cur_freq = val; + + return err; +} + +static void gen6_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + /* All of these values are in units of 50MHz */ + + /* static values from HW: RP0 > RP1 > RPn (min_freq) */ + if (IS_GEN9_LP(i915)) { + u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP); + + rps->rp0_freq = (rp_state_cap >> 16) & 0xff; + rps->rp1_freq = (rp_state_cap >> 8) & 0xff; + rps->min_freq = (rp_state_cap >> 0) & 0xff; + } else { + u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP); + + rps->rp0_freq = (rp_state_cap >> 0) & 0xff; + rps->rp1_freq = (rp_state_cap >> 8) & 0xff; + rps->min_freq = (rp_state_cap >> 16) & 0xff; + } + + /* hw_max = RP0 until we check for overclocking */ + rps->max_freq = rps->rp0_freq; + + rps->efficient_freq = rps->rp1_freq; + if (IS_HASWELL(i915) || IS_BROADWELL(i915) || + IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + u32 ddcc_status = 0; + + if (sandybridge_pcode_read(i915, + HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, + &ddcc_status, NULL) == 0) + rps->efficient_freq = + clamp_t(u8, + (ddcc_status >> 8) & 0xff, + rps->min_freq, + rps->max_freq); + } + + if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + /* Store the frequency values in 16.66 MHZ units, which is + * the natural hardware unit for SKL + */ + rps->rp0_freq *= GEN9_FREQ_SCALER; + rps->rp1_freq *= GEN9_FREQ_SCALER; + rps->min_freq *= GEN9_FREQ_SCALER; + rps->max_freq *= GEN9_FREQ_SCALER; + rps->efficient_freq *= GEN9_FREQ_SCALER; + } +} + +static bool rps_reset(struct intel_rps *rps) +{ + /* force a reset */ + rps->power.mode = -1; + rps->last_freq = -1; + + if (rps_set(rps, rps->min_freq)) { + DRM_ERROR("Failed to reset RPS to initial values\n"); + return false; + } + + rps->cur_freq = rps->min_freq; + return true; +} + +/* See the Gen9_GT_PM_Programming_Guide doc for the below */ +static bool gen9_rps_enable(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + /* Program defaults and thresholds for RPS */ + if (IS_GEN(i915, 9)) + intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, + GEN9_FREQUENCY(rps->rp1_freq)); + + /* 1 second timeout */ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, + GT_INTERVAL_FROM_US(i915, 1000000)); + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); + + return rps_reset(rps); +} + +static bool gen8_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + + intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, + HSW_FREQUENCY(rps->rp1_freq)); + + /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, + 100000000 / 128); /* 1 second timeout */ + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + return rps_reset(rps); +} + +static bool gen6_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + + /* Power down if completely idle for over 50ms */ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + return rps_reset(rps); +} + +static int chv_rps_max_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); + + switch (RUNTIME_INFO(i915)->sseu.eu_total) { + case 8: + /* (2 * 4) config */ + val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; + break; + case 12: + /* (2 * 6) config */ + val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; + break; + case 16: + /* (2 * 8) config */ + default: + /* Setting (2 * 8) Min RP0 for any other combination */ + val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; + break; + } + + return val & FB_GFX_FREQ_FUSE_MASK; +} + +static int chv_rps_rpe_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG); + val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; + + return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; +} + +static int chv_rps_guar_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); + + return val & FB_GFX_FREQ_FUSE_MASK; +} + +static u32 chv_rps_min_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE); + val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; + + return val & FB_GFX_FREQ_FUSE_MASK; +} + +static bool chv_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + /* 1: Program defaults and thresholds for RPS*/ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + /* 2: Enable RPS */ + intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + + /* Setting Fixed Bias */ + vlv_punit_get(i915); + + val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; + vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); + + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + + vlv_punit_put(i915); + + /* RPS code assumes GPLL is used */ + WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + + return rps_reset(rps); +} + +static int vlv_rps_guar_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val, rp1; + + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); + + rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; + rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; + + return rp1; +} + +static int vlv_rps_max_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val, rp0; + + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); + + rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; + /* Clamp to max */ + rp0 = min_t(u32, rp0, 0xea); + + return rp0; +} + +static int vlv_rps_rpe_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val, rpe; + + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO); + rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI); + rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; + + return rpe; +} + +static int vlv_rps_min_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff; + /* + * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value + * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on + * a BYT-M B0 the above register contains 0xbf. Moreover when setting + * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 + * to make sure it matches what Punit accepts. + */ + return max_t(u32, val, 0xc0); +} + +static bool vlv_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_CONT); + + vlv_punit_get(i915); + + /* Setting Fixed Bias */ + val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; + vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); + + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + + vlv_punit_put(i915); + + /* RPS code assumes GPLL is used */ + WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + + return rps_reset(rps); +} + +static unsigned long __ips_gfx_val(struct intel_ips *ips) +{ + struct intel_rps *rps = container_of(ips, typeof(*rps), ips); + struct intel_uncore *uncore = rps_to_uncore(rps); + unsigned long t, corr, state1, corr2, state2; + u32 pxvid, ext_v; + + lockdep_assert_held(&mchdev_lock); + + pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); + pxvid = (pxvid >> 24) & 0x7f; + ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); + + state1 = ext_v; + + /* Revel in the empirically derived constants */ + + /* Correction factor in 1/100000 units */ + t = ips_mch_val(uncore); + if (t > 80) + corr = t * 2349 + 135940; + else if (t >= 50) + corr = t * 964 + 29317; + else /* < 50 */ + corr = t * 301 + 1004; + + corr = corr * 150142 * state1 / 10000 - 78642; + corr /= 100000; + corr2 = corr * ips->corr; + + state2 = corr2 * state1 / 10000; + state2 /= 100; /* convert to mW */ + + __gen5_ips_update(ips); + + return ips->gfx_power + state2; +} + +void intel_rps_enable(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + if (IS_CHERRYVIEW(i915)) + rps->enabled = chv_rps_enable(rps); + else if (IS_VALLEYVIEW(i915)) + rps->enabled = vlv_rps_enable(rps); + else if (INTEL_GEN(i915) >= 9) + rps->enabled = gen9_rps_enable(rps); + else if (INTEL_GEN(i915) >= 8) + rps->enabled = gen8_rps_enable(rps); + else if (INTEL_GEN(i915) >= 6) + rps->enabled = gen6_rps_enable(rps); + else if (IS_IRONLAKE_M(i915)) + rps->enabled = gen5_rps_enable(rps); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + if (!rps->enabled) + return; + + WARN_ON(rps->max_freq < rps->min_freq); + WARN_ON(rps->idle_freq > rps->max_freq); + + WARN_ON(rps->efficient_freq < rps->min_freq); + WARN_ON(rps->efficient_freq > rps->max_freq); +} + +static void gen6_rps_disable(struct intel_rps *rps) +{ + intel_uncore_write(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); +} + +void intel_rps_disable(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + rps->enabled = false; + + if (INTEL_GEN(i915) >= 6) + gen6_rps_disable(rps); + else if (IS_IRONLAKE_M(i915)) + gen5_rps_disable(rps); +} + +static int byt_gpu_freq(struct intel_rps *rps, int val) +{ + /* + * N = val - 0xb7 + * Slow = Fast = GPLL ref * N + */ + return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); +} + +static int byt_freq_opcode(struct intel_rps *rps, int val) +{ + return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; +} + +static int chv_gpu_freq(struct intel_rps *rps, int val) +{ + /* + * N = val / 2 + * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 + */ + return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); +} + +static int chv_freq_opcode(struct intel_rps *rps, int val) +{ + /* CHV needs even values */ + return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; +} + +int intel_gpu_freq(struct intel_rps *rps, int val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (INTEL_GEN(i915) >= 9) + return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, + GEN9_FREQ_SCALER); + else if (IS_CHERRYVIEW(i915)) + return chv_gpu_freq(rps, val); + else if (IS_VALLEYVIEW(i915)) + return byt_gpu_freq(rps, val); + else + return val * GT_FREQUENCY_MULTIPLIER; +} + +int intel_freq_opcode(struct intel_rps *rps, int val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (INTEL_GEN(i915) >= 9) + return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, + GT_FREQUENCY_MULTIPLIER); + else if (IS_CHERRYVIEW(i915)) + return chv_freq_opcode(rps, val); + else if (IS_VALLEYVIEW(i915)) + return byt_freq_opcode(rps, val); + else + return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); +} + +static void vlv_init_gpll_ref_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + rps->gpll_ref_freq = + vlv_get_cck_clock(i915, "GPLL ref", + CCK_GPLL_CLOCK_CONTROL, + i915->czclk_freq); + + DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq); +} + +static void vlv_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + vlv_iosf_sb_get(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); + + vlv_init_gpll_ref_freq(rps); + + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + switch ((val >> 6) & 3) { + case 0: + case 1: + i915->mem_freq = 800; + break; + case 2: + i915->mem_freq = 1066; + break; + case 3: + i915->mem_freq = 1333; + break; + } + DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq); + + rps->max_freq = vlv_rps_max_freq(rps); + rps->rp0_freq = rps->max_freq; + DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->max_freq), + rps->max_freq); + + rps->efficient_freq = vlv_rps_rpe_freq(rps); + DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->efficient_freq), + rps->efficient_freq); + + rps->rp1_freq = vlv_rps_guar_freq(rps); + DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->rp1_freq), + rps->rp1_freq); + + rps->min_freq = vlv_rps_min_freq(rps); + DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->min_freq), + rps->min_freq); + + vlv_iosf_sb_put(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); +} + +static void chv_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + vlv_iosf_sb_get(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); + + vlv_init_gpll_ref_freq(rps); + + val = vlv_cck_read(i915, CCK_FUSE_REG); + + switch ((val >> 2) & 0x7) { + case 3: + i915->mem_freq = 2000; + break; + default: + i915->mem_freq = 1600; + break; + } + DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq); + + rps->max_freq = chv_rps_max_freq(rps); + rps->rp0_freq = rps->max_freq; + DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->max_freq), + rps->max_freq); + + rps->efficient_freq = chv_rps_rpe_freq(rps); + DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->efficient_freq), + rps->efficient_freq); + + rps->rp1_freq = chv_rps_guar_freq(rps); + DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->rp1_freq), + rps->rp1_freq); + + rps->min_freq = chv_rps_min_freq(rps); + DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->min_freq), + rps->min_freq); + + vlv_iosf_sb_put(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); + + WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq | + rps->min_freq) & 1, + "Odd GPU freq values\n"); +} + +static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) +{ + ei->ktime = ktime_get_raw(); + ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); + ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); +} + +static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + const struct intel_rps_ei *prev = &rps->ei; + struct intel_rps_ei now; + u32 events = 0; + + if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) + return 0; + + vlv_c0_read(uncore, &now); + + if (prev->ktime) { + u64 time, c0; + u32 render, media; + + time = ktime_us_delta(now.ktime, prev->ktime); + + time *= rps_to_i915(rps)->czclk_freq; + + /* Workload can be split between render + media, + * e.g. SwapBuffers being blitted in X after being rendered in + * mesa. To account for this we need to combine both engines + * into our activity counter. + */ + render = now.render_c0 - prev->render_c0; + media = now.media_c0 - prev->media_c0; + c0 = max(render, media); + c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ + + if (c0 > time * rps->power.up_threshold) + events = GEN6_PM_RP_UP_THRESHOLD; + else if (c0 < time * rps->power.down_threshold) + events = GEN6_PM_RP_DOWN_THRESHOLD; + } + + rps->ei = now; + return events; +} + +static void rps_work(struct work_struct *work) +{ + struct intel_rps *rps = container_of(work, typeof(*rps), work); + struct intel_gt *gt = rps_to_gt(rps); + bool client_boost = false; + int new_freq, adj, min, max; + u32 pm_iir = 0; + + spin_lock_irq(>->irq_lock); + pm_iir = fetch_and_zero(&rps->pm_iir); + client_boost = atomic_read(&rps->num_waiters); + spin_unlock_irq(>->irq_lock); + + /* Make sure we didn't queue anything we're not going to process. */ + if ((pm_iir & rps->pm_events) == 0 && !client_boost) + goto out; + + mutex_lock(&rps->lock); + + pm_iir |= vlv_wa_c0_ei(rps, pm_iir); + + adj = rps->last_adj; + new_freq = rps->cur_freq; + min = rps->min_freq_softlimit; + max = rps->max_freq_softlimit; + if (client_boost) + max = rps->max_freq; + if (client_boost && new_freq < rps->boost_freq) { + new_freq = rps->boost_freq; + adj = 0; + } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { + if (adj > 0) + adj *= 2; + else /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; + + if (new_freq >= rps->max_freq_softlimit) + adj = 0; + } else if (client_boost) { + adj = 0; + } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { + if (rps->cur_freq > rps->efficient_freq) + new_freq = rps->efficient_freq; + else if (rps->cur_freq > rps->min_freq_softlimit) + new_freq = rps->min_freq_softlimit; + adj = 0; + } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { + if (adj < 0) + adj *= 2; + else /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; + + if (new_freq <= rps->min_freq_softlimit) + adj = 0; + } else { /* unknown event */ + adj = 0; + } + + rps->last_adj = adj; + + /* + * Limit deboosting and boosting to keep ourselves at the extremes + * when in the respective power modes (i.e. slowly decrease frequencies + * while in the HIGH_POWER zone and slowly increase frequencies while + * in the LOW_POWER zone). On idle, we will hit the timeout and drop + * to the next level quickly, and conversely if busy we expect to + * hit a waitboost and rapidly switch into max power. + */ + if ((adj < 0 && rps->power.mode == HIGH_POWER) || + (adj > 0 && rps->power.mode == LOW_POWER)) + rps->last_adj = 0; + + /* sysfs frequency interfaces may have snuck in while servicing the + * interrupt + */ + new_freq += adj; + new_freq = clamp_t(int, new_freq, min, max); + + if (intel_rps_set(rps, new_freq)) { + DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); + rps->last_adj = 0; + } + + mutex_unlock(&rps->lock); + +out: + spin_lock_irq(>->irq_lock); + gen6_gt_pm_unmask_irq(gt, rps->pm_events); + spin_unlock_irq(>->irq_lock); +} + +void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) +{ + struct intel_gt *gt = rps_to_gt(rps); + const u32 events = rps->pm_events & pm_iir; + + lockdep_assert_held(>->irq_lock); + + if (unlikely(!events)) + return; + + gen6_gt_pm_mask_irq(gt, events); + + rps->pm_iir |= events; + schedule_work(&rps->work); +} + +void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) +{ + struct intel_gt *gt = rps_to_gt(rps); + + if (pm_iir & rps->pm_events) { + spin_lock(>->irq_lock); + gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events); + rps->pm_iir |= pm_iir & rps->pm_events; + schedule_work(&rps->work); + spin_unlock(>->irq_lock); + } + + if (INTEL_GEN(gt->i915) >= 8) + return; + + if (pm_iir & PM_VEBOX_USER_INTERRUPT) + intel_engine_breadcrumbs_irq(gt->engine[VECS0]); + + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); +} + +void gen5_rps_irq_handler(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u32 busy_up, busy_down, max_avg, min_avg; + u8 new_freq; + + spin_lock(&mchdev_lock); + + intel_uncore_write16(uncore, + MEMINTRSTS, + intel_uncore_read(uncore, MEMINTRSTS)); + + intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); + busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); + busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); + max_avg = intel_uncore_read(uncore, RCBMAXAVG); + min_avg = intel_uncore_read(uncore, RCBMINAVG); + + /* Handle RCS change request from hw */ + new_freq = rps->cur_freq; + if (busy_up > max_avg) + new_freq++; + else if (busy_down < min_avg) + new_freq--; + new_freq = clamp(new_freq, + rps->min_freq_softlimit, + rps->max_freq_softlimit); + + if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq)) + rps->cur_freq = new_freq; + + spin_unlock(&mchdev_lock); +} + +void intel_rps_init_early(struct intel_rps *rps) +{ + mutex_init(&rps->lock); + mutex_init(&rps->power.mutex); + + INIT_WORK(&rps->work, rps_work); + + atomic_set(&rps->num_waiters, 0); +} + +void intel_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (IS_CHERRYVIEW(i915)) + chv_rps_init(rps); + else if (IS_VALLEYVIEW(i915)) + vlv_rps_init(rps); + else if (INTEL_GEN(i915) >= 6) + gen6_rps_init(rps); + else if (IS_IRONLAKE_M(i915)) + gen5_rps_init(rps); + + /* Derive initial user preferences/limits from the hardware limits */ + rps->max_freq_softlimit = rps->max_freq; + rps->min_freq_softlimit = rps->min_freq; + + /* After setting max-softlimit, find the overclock max freq */ + if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { + u32 params = 0; + + sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, + ¶ms, NULL); + if (params & BIT(31)) { /* OC supported */ + DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", + (rps->max_freq & 0xff) * 50, + (params & 0xff) * 50); + rps->max_freq = params & 0xff; + } + } + + /* Finally allow us to boost to max by default */ + rps->boost_freq = rps->max_freq; + rps->idle_freq = rps->min_freq; + rps->cur_freq = rps->idle_freq; + + rps->pm_intrmsk_mbz = 0; + + /* + * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + * + * TODO: verify if this can be reproduced on VLV,CHV. + */ + if (INTEL_GEN(i915) <= 7) + rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; + + if (INTEL_GEN(i915) >= 8) + rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; +} + +u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 cagf; + + if (INTEL_GEN(i915) >= 9) + cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; + else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; + else + cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; + + return cagf; +} + +/* External interface for intel_ips.ko */ + +static struct drm_i915_private __rcu *ips_mchdev; + +/** + * Tells the intel_ips driver that the i915 driver is now loaded, if + * IPS got loaded first. + * + * This awkward dance is so that neither module has to depend on the + * other in order for IPS to do the appropriate communication of + * GPU turbo limits to i915. + */ +static void +ips_ping_for_i915_load(void) +{ + void (*link)(void); + + link = symbol_get(ips_link_to_i915_driver); + if (link) { + link(); + symbol_put(ips_link_to_i915_driver); + } +} + +void intel_rps_driver_register(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + /* + * We only register the i915 ips part with intel-ips once everything is + * set up, to avoid intel-ips sneaking in and reading bogus values. + */ + if (IS_GEN(gt->i915, 5)) { + rcu_assign_pointer(ips_mchdev, gt->i915); + ips_ping_for_i915_load(); + } +} + +void intel_rps_driver_unregister(struct intel_rps *rps) +{ + rcu_assign_pointer(ips_mchdev, NULL); +} + +static struct drm_i915_private *mchdev_get(void) +{ + struct drm_i915_private *i915; + + rcu_read_lock(); + i915 = rcu_dereference(ips_mchdev); + if (!kref_get_unless_zero(&i915->drm.ref)) + i915 = NULL; + rcu_read_unlock(); + + return i915; +} + +/** + * i915_read_mch_val - return value for IPS use + * + * Calculate and return a value for the IPS driver to use when deciding whether + * we have thermal and power headroom to increase CPU or GPU power budget. + */ +unsigned long i915_read_mch_val(void) +{ + struct drm_i915_private *i915; + unsigned long chipset_val = 0; + unsigned long graphics_val = 0; + intel_wakeref_t wakeref; + + i915 = mchdev_get(); + if (!i915) + return 0; + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + struct intel_ips *ips = &i915->gt.rps.ips; + + spin_lock_irq(&mchdev_lock); + chipset_val = __ips_chipset_val(ips); + graphics_val = __ips_gfx_val(ips); + spin_unlock_irq(&mchdev_lock); + } + + drm_dev_put(&i915->drm); + return chipset_val + graphics_val; +} +EXPORT_SYMBOL_GPL(i915_read_mch_val); + +/** + * i915_gpu_raise - raise GPU frequency limit + * + * Raise the limit; IPS indicates we have thermal headroom. + */ +bool i915_gpu_raise(void) +{ + struct drm_i915_private *i915; + struct intel_rps *rps; + + i915 = mchdev_get(); + if (!i915) + return false; + + rps = &i915->gt.rps; + + spin_lock_irq(&mchdev_lock); + if (rps->max_freq_softlimit < rps->max_freq) + rps->max_freq_softlimit++; + spin_unlock_irq(&mchdev_lock); + + drm_dev_put(&i915->drm); + return true; +} +EXPORT_SYMBOL_GPL(i915_gpu_raise); + +/** + * i915_gpu_lower - lower GPU frequency limit + * + * IPS indicates we're close to a thermal limit, so throttle back the GPU + * frequency maximum. + */ +bool i915_gpu_lower(void) +{ + struct drm_i915_private *i915; + struct intel_rps *rps; + + i915 = mchdev_get(); + if (!i915) + return false; + + rps = &i915->gt.rps; + + spin_lock_irq(&mchdev_lock); + if (rps->max_freq_softlimit > rps->min_freq) + rps->max_freq_softlimit--; + spin_unlock_irq(&mchdev_lock); + + drm_dev_put(&i915->drm); + return true; +} +EXPORT_SYMBOL_GPL(i915_gpu_lower); + +/** + * i915_gpu_busy - indicate GPU business to IPS + * + * Tell the IPS driver whether or not the GPU is busy. + */ +bool i915_gpu_busy(void) +{ + struct drm_i915_private *i915; + bool ret; + + i915 = mchdev_get(); + if (!i915) + return false; + + ret = i915->gt.awake; + + drm_dev_put(&i915->drm); + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_busy); + +/** + * i915_gpu_turbo_disable - disable graphics turbo + * + * Disable graphics turbo by resetting the max frequency and setting the + * current frequency to the default. + */ +bool i915_gpu_turbo_disable(void) +{ + struct drm_i915_private *i915; + struct intel_rps *rps; + bool ret; + + i915 = mchdev_get(); + if (!i915) + return false; + + rps = &i915->gt.rps; + + spin_lock_irq(&mchdev_lock); + rps->max_freq_softlimit = rps->min_freq; + ret = gen5_rps_set(&i915->gt.rps, rps->min_freq); + spin_unlock_irq(&mchdev_lock); + + drm_dev_put(&i915->drm); + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h new file mode 100644 index 000000000000..9518c66c9792 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RPS_H +#define INTEL_RPS_H + +#include "intel_rps_types.h" + +struct i915_request; + +void intel_rps_init_early(struct intel_rps *rps); +void intel_rps_init(struct intel_rps *rps); + +void intel_rps_driver_register(struct intel_rps *rps); +void intel_rps_driver_unregister(struct intel_rps *rps); + +void intel_rps_enable(struct intel_rps *rps); +void intel_rps_disable(struct intel_rps *rps); + +void intel_rps_park(struct intel_rps *rps); +void intel_rps_unpark(struct intel_rps *rps); +void intel_rps_boost(struct i915_request *rq); + +int intel_rps_set(struct intel_rps *rps, u8 val); +void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive); + +int intel_gpu_freq(struct intel_rps *rps, int val); +int intel_freq_opcode(struct intel_rps *rps, int val); +u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1); + +void gen5_rps_irq_handler(struct intel_rps *rps); +void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); +void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); + +#endif /* INTEL_RPS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h new file mode 100644 index 000000000000..c2e279154bd5 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RPS_TYPES_H +#define INTEL_RPS_TYPES_H + +#include <linux/atomic.h> +#include <linux/ktime.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct intel_ips { + u64 last_count1; + unsigned long last_time1; + unsigned long chipset_power; + u64 last_count2; + u64 last_time2; + unsigned long gfx_power; + u8 corr; + + int c, m; +}; + +struct intel_rps_ei { + ktime_t ktime; + u32 render_c0; + u32 media_c0; +}; + +struct intel_rps { + struct mutex lock; /* protects enabling and the worker */ + + /* + * work, interrupts_enabled and pm_iir are protected by + * dev_priv->irq_lock + */ + struct work_struct work; + bool enabled; + bool active; + u32 pm_iir; + + /* PM interrupt bits that should never be masked */ + u32 pm_intrmsk_mbz; + u32 pm_events; + + /* Frequencies are stored in potentially platform dependent multiples. + * In other words, *_freq needs to be multiplied by X to be interesting. + * Soft limits are those which are used for the dynamic reclocking done + * by the driver (raise frequencies under heavy loads, and lower for + * lighter loads). Hard limits are those imposed by the hardware. + * + * A distinction is made for overclocking, which is never enabled by + * default, and is considered to be above the hard limit if it's + * possible at all. + */ + u8 cur_freq; /* Current frequency (cached, may not == HW) */ + u8 last_freq; /* Last SWREQ frequency */ + u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ + u8 max_freq_softlimit; /* Max frequency permitted by the driver */ + u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ + u8 min_freq; /* AKA RPn. Minimum frequency */ + u8 boost_freq; /* Frequency to request when wait boosting */ + u8 idle_freq; /* Frequency to request when we are idle */ + u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ + u8 rp1_freq; /* "less than" RP0 power/freqency */ + u8 rp0_freq; /* Non-overclocked max frequency. */ + u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ + + int last_adj; + + struct { + struct mutex mutex; + + enum { LOW_POWER, BETWEEN, HIGH_POWER } mode; + unsigned int interactive; + + u8 up_threshold; /* Current %busy required to uplock */ + u8 down_threshold; /* Current %busy required to downclock */ + } power; + + atomic_t num_waiters; + atomic_t boosts; + + /* manual wa residency calculations */ + struct intel_rps_ei ei; + struct intel_ips ips; +}; + +#endif /* INTEL_RPS_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c new file mode 100644 index 000000000000..74f793423231 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -0,0 +1,194 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_lrc_reg.h" +#include "intel_sseu.h" + +void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + u8 max_subslices, u8 max_eus_per_subslice) +{ + sseu->max_slices = max_slices; + sseu->max_subslices = max_subslices; + sseu->max_eus_per_subslice = max_eus_per_subslice; + + sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); + GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE); + sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); + GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE); +} + +unsigned int +intel_sseu_subslice_total(const struct sseu_dev_info *sseu) +{ + unsigned int i, total = 0; + + for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++) + total += hweight8(sseu->subslice_mask[i]); + + return total; +} + +u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice) +{ + int i, offset = slice * sseu->ss_stride; + u32 mask = 0; + + GEM_BUG_ON(slice >= sseu->max_slices); + + for (i = 0; i < sseu->ss_stride; i++) + mask |= (u32)sseu->subslice_mask[offset + i] << + i * BITS_PER_BYTE; + + return mask; +} + +void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u32 ss_mask) +{ + int offset = slice * sseu->ss_stride; + + memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride); +} + +unsigned int +intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) +{ + return hweight32(intel_sseu_get_subslices(sseu, slice)); +} + +u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, + const struct intel_sseu *req_sseu) +{ + const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + bool subslice_pg = sseu->has_subslice_pg; + struct intel_sseu ctx_sseu; + u8 slices, subslices; + u32 rpcs = 0; + + /* + * No explicit RPCS request is needed to ensure full + * slice/subslice/EU enablement prior to Gen9. + */ + if (INTEL_GEN(i915) < 9) + return 0; + + /* + * If i915/perf is active, we want a stable powergating configuration + * on the system. + * + * We could choose full enablement, but on ICL we know there are use + * cases which disable slices for functional, apart for performance + * reasons. So in this case we select a known stable subset. + */ + if (!i915->perf.exclusive_stream) { + ctx_sseu = *req_sseu; + } else { + ctx_sseu = intel_sseu_from_device_info(sseu); + + if (IS_GEN(i915, 11)) { + /* + * We only need subslice count so it doesn't matter + * which ones we select - just turn off low bits in the + * amount of half of all available subslices per slice. + */ + ctx_sseu.subslice_mask = + ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2)); + ctx_sseu.slice_mask = 0x1; + } + } + + slices = hweight8(ctx_sseu.slice_mask); + subslices = hweight8(ctx_sseu.subslice_mask); + + /* + * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits + * wide and Icelake has up to eight subslices, specfial programming is + * needed in order to correctly enable all subslices. + * + * According to documentation software must consider the configuration + * as 2x4x8 and hardware will translate this to 1x8x8. + * + * Furthemore, even though SScount is three bits, maximum documented + * value for it is four. From this some rules/restrictions follow: + * + * 1. + * If enabled subslice count is greater than four, two whole slices must + * be enabled instead. + * + * 2. + * When more than one slice is enabled, hardware ignores the subslice + * count altogether. + * + * From these restrictions it follows that it is not possible to enable + * a count of subslices between the SScount maximum of four restriction, + * and the maximum available number on a particular SKU. Either all + * subslices are enabled, or a count between one and four on the first + * slice. + */ + if (IS_GEN(i915, 11) && + slices == 1 && + subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) { + GEM_BUG_ON(subslices & 1); + + subslice_pg = false; + slices *= 2; + } + + /* + * Starting in Gen9, render power gating can leave + * slice/subslice/EU in a partially enabled state. We + * must make an explicit request through RPCS for full + * enablement. + */ + if (sseu->has_slice_pg) { + u32 mask, val = slices; + + if (INTEL_GEN(i915) >= 11) { + mask = GEN11_RPCS_S_CNT_MASK; + val <<= GEN11_RPCS_S_CNT_SHIFT; + } else { + mask = GEN8_RPCS_S_CNT_MASK; + val <<= GEN8_RPCS_S_CNT_SHIFT; + } + + GEM_BUG_ON(val & ~mask); + val &= mask; + + rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val; + } + + if (subslice_pg) { + u32 val = subslices; + + val <<= GEN8_RPCS_SS_CNT_SHIFT; + + GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK); + val &= GEN8_RPCS_SS_CNT_MASK; + + rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val; + } + + if (sseu->has_eu_pg) { + u32 val; + + val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; + GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK); + val &= GEN8_RPCS_EU_MIN_MASK; + + rpcs |= val; + + val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; + GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK); + val &= GEN8_RPCS_EU_MAX_MASK; + + rpcs |= val; + + rpcs |= GEN8_RPCS_ENABLE; + } + + return rpcs; +} diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h new file mode 100644 index 000000000000..d1d225204f09 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_SSEU_H__ +#define __INTEL_SSEU_H__ + +#include <linux/types.h> +#include <linux/kernel.h> + +#include "i915_gem.h" + +struct drm_i915_private; + +#define GEN_MAX_SLICES (6) /* CNL upper bound */ +#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ +#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) +#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) +#define GEN_MAX_EUS (16) /* TGL upper bound */ +#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS) + +struct sseu_dev_info { + u8 slice_mask; + u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; + u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE]; + u16 eu_total; + u8 eu_per_subslice; + u8 min_eu_in_pool; + /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ + u8 subslice_7eu[3]; + u8 has_slice_pg:1; + u8 has_subslice_pg:1; + u8 has_eu_pg:1; + + /* Topology fields */ + u8 max_slices; + u8 max_subslices; + u8 max_eus_per_subslice; + + u8 ss_stride; + u8 eu_stride; +}; + +/* + * Powergating configuration for a particular (context,engine). + */ +struct intel_sseu { + u8 slice_mask; + u8 subslice_mask; + u8 min_eus_per_subslice; + u8 max_eus_per_subslice; +}; + +static inline struct intel_sseu +intel_sseu_from_device_info(const struct sseu_dev_info *sseu) +{ + struct intel_sseu value = { + .slice_mask = sseu->slice_mask, + .subslice_mask = sseu->subslice_mask[0], + .min_eus_per_subslice = sseu->max_eus_per_subslice, + .max_eus_per_subslice = sseu->max_eus_per_subslice, + }; + + return value; +} + +static inline bool +intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice, + int subslice) +{ + u8 mask; + int ss_idx = subslice / BITS_PER_BYTE; + + GEM_BUG_ON(ss_idx >= sseu->ss_stride); + + mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx]; + + return mask & BIT(subslice % BITS_PER_BYTE); +} + +void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + u8 max_subslices, u8 max_eus_per_subslice); + +unsigned int +intel_sseu_subslice_total(const struct sseu_dev_info *sseu); + +unsigned int +intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); + +u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice); + +void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u32 ss_mask); + +u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, + const struct intel_sseu *req_sseu); + +#endif /* __INTEL_SSEU_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c new file mode 100644 index 000000000000..14ad10acd548 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -0,0 +1,581 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016-2018 Intel Corporation + */ + +#include "i915_drv.h" + +#include "i915_active.h" +#include "i915_syncmap.h" +#include "intel_gt.h" +#include "intel_ring.h" +#include "intel_timeline.h" + +#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) +#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) + +struct intel_timeline_hwsp { + struct intel_gt *gt; + struct intel_gt_timelines *gt_timelines; + struct list_head free_link; + struct i915_vma *vma; + u64 free_bitmap; +}; + +struct intel_timeline_cacheline { + struct i915_active active; + struct intel_timeline_hwsp *hwsp; + void *vaddr; +#define CACHELINE_BITS 6 +#define CACHELINE_FREE CACHELINE_BITS +}; + +static struct i915_vma *__hwsp_alloc(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) + i915_gem_object_put(obj); + + return vma; +} + +static struct i915_vma * +hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline) +{ + struct intel_gt_timelines *gt = &timeline->gt->timelines; + struct intel_timeline_hwsp *hwsp; + + BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); + + spin_lock_irq(>->hwsp_lock); + + /* hwsp_free_list only contains HWSP that have available cachelines */ + hwsp = list_first_entry_or_null(>->hwsp_free_list, + typeof(*hwsp), free_link); + if (!hwsp) { + struct i915_vma *vma; + + spin_unlock_irq(>->hwsp_lock); + + hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL); + if (!hwsp) + return ERR_PTR(-ENOMEM); + + vma = __hwsp_alloc(timeline->gt); + if (IS_ERR(vma)) { + kfree(hwsp); + return vma; + } + + vma->private = hwsp; + hwsp->gt = timeline->gt; + hwsp->vma = vma; + hwsp->free_bitmap = ~0ull; + hwsp->gt_timelines = gt; + + spin_lock_irq(>->hwsp_lock); + list_add(&hwsp->free_link, >->hwsp_free_list); + } + + GEM_BUG_ON(!hwsp->free_bitmap); + *cacheline = __ffs64(hwsp->free_bitmap); + hwsp->free_bitmap &= ~BIT_ULL(*cacheline); + if (!hwsp->free_bitmap) + list_del(&hwsp->free_link); + + spin_unlock_irq(>->hwsp_lock); + + GEM_BUG_ON(hwsp->vma->private != hwsp); + return hwsp->vma; +} + +static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline) +{ + struct intel_gt_timelines *gt = hwsp->gt_timelines; + unsigned long flags; + + spin_lock_irqsave(>->hwsp_lock, flags); + + /* As a cacheline becomes available, publish the HWSP on the freelist */ + if (!hwsp->free_bitmap) + list_add_tail(&hwsp->free_link, >->hwsp_free_list); + + GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap)); + hwsp->free_bitmap |= BIT_ULL(cacheline); + + /* And if no one is left using it, give the page back to the system */ + if (hwsp->free_bitmap == ~0ull) { + i915_vma_put(hwsp->vma); + list_del(&hwsp->free_link); + kfree(hwsp); + } + + spin_unlock_irqrestore(>->hwsp_lock, flags); +} + +static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) +{ + GEM_BUG_ON(!i915_active_is_idle(&cl->active)); + + i915_gem_object_unpin_map(cl->hwsp->vma->obj); + i915_vma_put(cl->hwsp->vma); + __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); + + i915_active_fini(&cl->active); + kfree(cl); +} + +__i915_active_call +static void __cacheline_retire(struct i915_active *active) +{ + struct intel_timeline_cacheline *cl = + container_of(active, typeof(*cl), active); + + i915_vma_unpin(cl->hwsp->vma); + if (ptr_test_bit(cl->vaddr, CACHELINE_FREE)) + __idle_cacheline_free(cl); +} + +static int __cacheline_active(struct i915_active *active) +{ + struct intel_timeline_cacheline *cl = + container_of(active, typeof(*cl), active); + + __i915_vma_pin(cl->hwsp->vma); + return 0; +} + +static struct intel_timeline_cacheline * +cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) +{ + struct intel_timeline_cacheline *cl; + void *vaddr; + + GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS)); + + cl = kmalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return ERR_PTR(-ENOMEM); + + vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + kfree(cl); + return ERR_CAST(vaddr); + } + + i915_vma_get(hwsp->vma); + cl->hwsp = hwsp; + cl->vaddr = page_pack_bits(vaddr, cacheline); + + i915_active_init(&cl->active, __cacheline_active, __cacheline_retire); + + return cl; +} + +static void cacheline_acquire(struct intel_timeline_cacheline *cl) +{ + if (cl) + i915_active_acquire(&cl->active); +} + +static void cacheline_release(struct intel_timeline_cacheline *cl) +{ + if (cl) + i915_active_release(&cl->active); +} + +static void cacheline_free(struct intel_timeline_cacheline *cl) +{ + GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); + cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); + + if (i915_active_is_idle(&cl->active)) + __idle_cacheline_free(cl); +} + +int intel_timeline_init(struct intel_timeline *timeline, + struct intel_gt *gt, + struct i915_vma *hwsp) +{ + void *vaddr; + + kref_init(&timeline->kref); + atomic_set(&timeline->pin_count, 0); + + timeline->gt = gt; + + timeline->has_initial_breadcrumb = !hwsp; + timeline->hwsp_cacheline = NULL; + + if (!hwsp) { + struct intel_timeline_cacheline *cl; + unsigned int cacheline; + + hwsp = hwsp_alloc(timeline, &cacheline); + if (IS_ERR(hwsp)) + return PTR_ERR(hwsp); + + cl = cacheline_alloc(hwsp->private, cacheline); + if (IS_ERR(cl)) { + __idle_hwsp_free(hwsp->private, cacheline); + return PTR_ERR(cl); + } + + timeline->hwsp_cacheline = cl; + timeline->hwsp_offset = cacheline * CACHELINE_BYTES; + + vaddr = page_mask_bits(cl->vaddr); + } else { + timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; + + vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + } + + timeline->hwsp_seqno = + memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES); + + timeline->hwsp_ggtt = i915_vma_get(hwsp); + GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size); + + timeline->fence_context = dma_fence_context_alloc(1); + + mutex_init(&timeline->mutex); + + INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex); + INIT_LIST_HEAD(&timeline->requests); + + i915_syncmap_init(&timeline->sync); + + return 0; +} + +static void timelines_init(struct intel_gt *gt) +{ + struct intel_gt_timelines *timelines = >->timelines; + + spin_lock_init(&timelines->lock); + INIT_LIST_HEAD(&timelines->active_list); + + spin_lock_init(&timelines->hwsp_lock); + INIT_LIST_HEAD(&timelines->hwsp_free_list); +} + +void intel_timelines_init(struct drm_i915_private *i915) +{ + timelines_init(&i915->gt); +} + +void intel_timeline_fini(struct intel_timeline *timeline) +{ + GEM_BUG_ON(atomic_read(&timeline->pin_count)); + GEM_BUG_ON(!list_empty(&timeline->requests)); + + if (timeline->hwsp_cacheline) + cacheline_free(timeline->hwsp_cacheline); + else + i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); + + i915_vma_put(timeline->hwsp_ggtt); +} + +struct intel_timeline * +intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp) +{ + struct intel_timeline *timeline; + int err; + + timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); + if (!timeline) + return ERR_PTR(-ENOMEM); + + err = intel_timeline_init(timeline, gt, global_hwsp); + if (err) { + kfree(timeline); + return ERR_PTR(err); + } + + return timeline; +} + +int intel_timeline_pin(struct intel_timeline *tl) +{ + int err; + + if (atomic_add_unless(&tl->pin_count, 1, 0)) + return 0; + + err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) + return err; + + tl->hwsp_offset = + i915_ggtt_offset(tl->hwsp_ggtt) + + offset_in_page(tl->hwsp_offset); + + cacheline_acquire(tl->hwsp_cacheline); + if (atomic_fetch_inc(&tl->pin_count)) { + cacheline_release(tl->hwsp_cacheline); + __i915_vma_unpin(tl->hwsp_ggtt); + } + + return 0; +} + +void intel_timeline_enter(struct intel_timeline *tl) +{ + struct intel_gt_timelines *timelines = &tl->gt->timelines; + unsigned long flags; + + lockdep_assert_held(&tl->mutex); + + GEM_BUG_ON(!atomic_read(&tl->pin_count)); + if (tl->active_count++) + return; + GEM_BUG_ON(!tl->active_count); /* overflow? */ + + spin_lock_irqsave(&timelines->lock, flags); + list_add(&tl->link, &timelines->active_list); + spin_unlock_irqrestore(&timelines->lock, flags); +} + +void intel_timeline_exit(struct intel_timeline *tl) +{ + struct intel_gt_timelines *timelines = &tl->gt->timelines; + unsigned long flags; + + lockdep_assert_held(&tl->mutex); + + GEM_BUG_ON(!tl->active_count); + if (--tl->active_count) + return; + + spin_lock_irqsave(&timelines->lock, flags); + list_del(&tl->link); + spin_unlock_irqrestore(&timelines->lock, flags); + + /* + * Since this timeline is idle, all bariers upon which we were waiting + * must also be complete and so we can discard the last used barriers + * without loss of information. + */ + i915_syncmap_free(&tl->sync); +} + +static u32 timeline_advance(struct intel_timeline *tl) +{ + GEM_BUG_ON(!atomic_read(&tl->pin_count)); + GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); + + return tl->seqno += 1 + tl->has_initial_breadcrumb; +} + +static void timeline_rollback(struct intel_timeline *tl) +{ + tl->seqno -= 1 + tl->has_initial_breadcrumb; +} + +static noinline int +__intel_timeline_get_seqno(struct intel_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + struct intel_timeline_cacheline *cl; + unsigned int cacheline; + struct i915_vma *vma; + void *vaddr; + int err; + + /* + * If there is an outstanding GPU reference to this cacheline, + * such as it being sampled by a HW semaphore on another timeline, + * we cannot wraparound our seqno value (the HW semaphore does + * a strict greater-than-or-equals compare, not i915_seqno_passed). + * So if the cacheline is still busy, we must detach ourselves + * from it and leave it inflight alongside its users. + * + * However, if nobody is watching and we can guarantee that nobody + * will, we could simply reuse the same cacheline. + * + * if (i915_active_request_is_signaled(&tl->last_request) && + * i915_active_is_signaled(&tl->hwsp_cacheline->active)) + * return 0; + * + * That seems unlikely for a busy timeline that needed to wrap in + * the first place, so just replace the cacheline. + */ + + vma = hwsp_alloc(tl, &cacheline); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_rollback; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) { + __idle_hwsp_free(vma->private, cacheline); + goto err_rollback; + } + + cl = cacheline_alloc(vma->private, cacheline); + if (IS_ERR(cl)) { + err = PTR_ERR(cl); + __idle_hwsp_free(vma->private, cacheline); + goto err_unpin; + } + GEM_BUG_ON(cl->hwsp->vma != vma); + + /* + * Attach the old cacheline to the current request, so that we only + * free it after the current request is retired, which ensures that + * all writes into the cacheline from previous requests are complete. + */ + err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence); + if (err) + goto err_cacheline; + + cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */ + cacheline_free(tl->hwsp_cacheline); + + i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */ + i915_vma_put(tl->hwsp_ggtt); + + tl->hwsp_ggtt = i915_vma_get(vma); + + vaddr = page_mask_bits(cl->vaddr); + tl->hwsp_offset = cacheline * CACHELINE_BYTES; + tl->hwsp_seqno = + memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES); + + tl->hwsp_offset += i915_ggtt_offset(vma); + + cacheline_acquire(cl); + tl->hwsp_cacheline = cl; + + *seqno = timeline_advance(tl); + GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); + return 0; + +err_cacheline: + cacheline_free(cl); +err_unpin: + i915_vma_unpin(vma); +err_rollback: + timeline_rollback(tl); + return err; +} + +int intel_timeline_get_seqno(struct intel_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + *seqno = timeline_advance(tl); + + /* Replace the HWSP on wraparound for HW semaphores */ + if (unlikely(!*seqno && tl->hwsp_cacheline)) + return __intel_timeline_get_seqno(tl, rq, seqno); + + return 0; +} + +static int cacheline_ref(struct intel_timeline_cacheline *cl, + struct i915_request *rq) +{ + return i915_active_add_request(&cl->active, rq); +} + +int intel_timeline_read_hwsp(struct i915_request *from, + struct i915_request *to, + u32 *hwsp) +{ + struct intel_timeline *tl; + int err; + + rcu_read_lock(); + tl = rcu_dereference(from->timeline); + if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref)) + tl = NULL; + rcu_read_unlock(); + if (!tl) /* already completed */ + return 1; + + GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl); + + err = -EBUSY; + if (mutex_trylock(&tl->mutex)) { + struct intel_timeline_cacheline *cl = from->hwsp_cacheline; + + if (i915_request_completed(from)) { + err = 1; + goto unlock; + } + + err = cacheline_ref(cl, to); + if (err) + goto unlock; + + if (likely(cl == tl->hwsp_cacheline)) { + *hwsp = tl->hwsp_offset; + } else { /* across a seqno wrap, recover the original offset */ + *hwsp = i915_ggtt_offset(cl->hwsp->vma) + + ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * + CACHELINE_BYTES; + } + +unlock: + mutex_unlock(&tl->mutex); + } + intel_timeline_put(tl); + + return err; +} + +void intel_timeline_unpin(struct intel_timeline *tl) +{ + GEM_BUG_ON(!atomic_read(&tl->pin_count)); + if (!atomic_dec_and_test(&tl->pin_count)) + return; + + cacheline_release(tl->hwsp_cacheline); + + __i915_vma_unpin(tl->hwsp_ggtt); +} + +void __intel_timeline_free(struct kref *kref) +{ + struct intel_timeline *timeline = + container_of(kref, typeof(*timeline), kref); + + intel_timeline_fini(timeline); + kfree_rcu(timeline, rcu); +} + +static void timelines_fini(struct intel_gt *gt) +{ + struct intel_gt_timelines *timelines = >->timelines; + + GEM_BUG_ON(!list_empty(&timelines->active_list)); + GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); +} + +void intel_timelines_fini(struct drm_i915_private *i915) +{ + timelines_fini(&i915->gt); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "gt/selftests/mock_timeline.c" +#include "gt/selftest_timeline.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h new file mode 100644 index 000000000000..f583af1ba18d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_timeline.h @@ -0,0 +1,94 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef I915_TIMELINE_H +#define I915_TIMELINE_H + +#include <linux/lockdep.h> + +#include "i915_active.h" +#include "i915_syncmap.h" +#include "gt/intel_timeline_types.h" + +int intel_timeline_init(struct intel_timeline *tl, + struct intel_gt *gt, + struct i915_vma *hwsp); +void intel_timeline_fini(struct intel_timeline *tl); + +struct intel_timeline * +intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp); + +static inline struct intel_timeline * +intel_timeline_get(struct intel_timeline *timeline) +{ + kref_get(&timeline->kref); + return timeline; +} + +void __intel_timeline_free(struct kref *kref); +static inline void intel_timeline_put(struct intel_timeline *timeline) +{ + kref_put(&timeline->kref, __intel_timeline_free); +} + +static inline int __intel_timeline_sync_set(struct intel_timeline *tl, + u64 context, u32 seqno) +{ + return i915_syncmap_set(&tl->sync, context, seqno); +} + +static inline int intel_timeline_sync_set(struct intel_timeline *tl, + const struct dma_fence *fence) +{ + return __intel_timeline_sync_set(tl, fence->context, fence->seqno); +} + +static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl, + u64 context, u32 seqno) +{ + return i915_syncmap_is_later(&tl->sync, context, seqno); +} + +static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl, + const struct dma_fence *fence) +{ + return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); +} + +int intel_timeline_pin(struct intel_timeline *tl); +void intel_timeline_enter(struct intel_timeline *tl); +int intel_timeline_get_seqno(struct intel_timeline *tl, + struct i915_request *rq, + u32 *seqno); +void intel_timeline_exit(struct intel_timeline *tl); +void intel_timeline_unpin(struct intel_timeline *tl); + +int intel_timeline_read_hwsp(struct i915_request *from, + struct i915_request *until, + u32 *hwsp_offset); + +void intel_timelines_init(struct drm_i915_private *i915); +void intel_timelines_fini(struct drm_i915_private *i915); + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h new file mode 100644 index 000000000000..98d9ee166379 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __I915_TIMELINE_TYPES_H__ +#define __I915_TIMELINE_TYPES_H__ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/types.h> + +#include "i915_active_types.h" + +struct drm_i915_private; +struct i915_vma; +struct intel_timeline_cacheline; +struct i915_syncmap; + +struct intel_timeline { + u64 fence_context; + u32 seqno; + + struct mutex mutex; /* protects the flow of requests */ + + /* + * pin_count and active_count track essentially the same thing: + * How many requests are in flight or may be under construction. + * + * We need two distinct counters so that we can assign different + * lifetimes to the events for different use-cases. For example, + * we want to permanently keep the timeline pinned for the kernel + * context so that we can issue requests at any time without having + * to acquire space in the GGTT. However, we want to keep tracking + * the activity (to be able to detect when we become idle) along that + * permanently pinned timeline and so end up requiring two counters. + * + * Note that the active_count is protected by the intel_timeline.mutex, + * but the pin_count is protected by a combination of serialisation + * from the intel_context caller plus internal atomicity. + */ + atomic_t pin_count; + unsigned int active_count; + + const u32 *hwsp_seqno; + struct i915_vma *hwsp_ggtt; + u32 hwsp_offset; + + struct intel_timeline_cacheline *hwsp_cacheline; + + bool has_initial_breadcrumb; + + /** + * List of breadcrumbs associated with GPU requests currently + * outstanding. + */ + struct list_head requests; + + /* + * Contains an RCU guarded pointer to the last request. No reference is + * held to the request, users must carefully acquire a reference to + * the request using i915_active_fence_get(), or manage the RCU + * protection themselves (cf the i915_active_fence API). + */ + struct i915_active_fence last_request; + + /** + * We track the most recent seqno that we wait on in every context so + * that we only have to emit a new await and dependency on a more + * recent sync point. As the contexts may be executed out-of-order, we + * have to track each individually and can not rely on an absolute + * global_seqno. When we know that all tracked fences are completed + * (i.e. when the driver is idle), we know that the syncmap is + * redundant and we can discard it without loss of generality. + */ + struct i915_syncmap *sync; + + struct list_head link; + struct intel_gt *gt; + + struct kref kref; + struct rcu_head rcu; +}; + +#endif /* __I915_TIMELINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c new file mode 100644 index 000000000000..e4bccc14602f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -0,0 +1,1626 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_context.h" +#include "intel_gt.h" +#include "intel_ring.h" +#include "intel_workarounds.h" + +/** + * DOC: Hardware workarounds + * + * This file is intended as a central place to implement most [1]_ of the + * required workarounds for hardware to work as originally intended. They fall + * in five basic categories depending on how/when they are applied: + * + * - Workarounds that touch registers that are saved/restored to/from the HW + * context image. The list is emitted (via Load Register Immediate commands) + * everytime a new context is created. + * - GT workarounds. The list of these WAs is applied whenever these registers + * revert to default values (on GPU reset, suspend/resume [2]_, etc..). + * - Display workarounds. The list is applied during display clock-gating + * initialization. + * - Workarounds that whitelist a privileged register, so that UMDs can manage + * them directly. This is just a special case of a MMMIO workaround (as we + * write the list of these to/be-whitelisted registers to some special HW + * registers). + * - Workaround batchbuffers, that get executed automatically by the hardware + * on every HW context restore. + * + * .. [1] Please notice that there are other WAs that, due to their nature, + * cannot be applied from a central place. Those are peppered around the rest + * of the code, as needed. + * + * .. [2] Technically, some registers are powercontext saved & restored, so they + * survive a suspend/resume. In practice, writing them again is not too + * costly and simplifies things. We can revisit this in the future. + * + * Layout + * ~~~~~~ + * + * Keep things in this file ordered by WA type, as per the above (context, GT, + * display, register whitelist, batchbuffer). Then, inside each type, keep the + * following order: + * + * - Infrastructure functions and macros + * - WAs per platform in standard gen/chrono order + * - Public functions to init or apply the given workaround type. + */ + +static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name) +{ + wal->name = name; + wal->engine_name = engine_name; +} + +#define WA_LIST_CHUNK (1 << 4) + +static void wa_init_finish(struct i915_wa_list *wal) +{ + /* Trim unused entries. */ + if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) { + struct i915_wa *list = kmemdup(wal->list, + wal->count * sizeof(*list), + GFP_KERNEL); + + if (list) { + kfree(wal->list); + wal->list = list; + } + } + + if (!wal->count) + return; + + DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n", + wal->wa_count, wal->name, wal->engine_name); +} + +static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) +{ + unsigned int addr = i915_mmio_reg_offset(wa->reg); + unsigned int start = 0, end = wal->count; + const unsigned int grow = WA_LIST_CHUNK; + struct i915_wa *wa_; + + GEM_BUG_ON(!is_power_of_2(grow)); + + if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ + struct i915_wa *list; + + list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), + GFP_KERNEL); + if (!list) { + DRM_ERROR("No space for workaround init!\n"); + return; + } + + if (wal->list) + memcpy(list, wal->list, sizeof(*wa) * wal->count); + + wal->list = list; + } + + while (start < end) { + unsigned int mid = start + (end - start) / 2; + + if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) { + start = mid + 1; + } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) { + end = mid; + } else { + wa_ = &wal->list[mid]; + + if ((wa->mask & ~wa_->mask) == 0) { + DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n", + i915_mmio_reg_offset(wa_->reg), + wa_->mask, wa_->val); + + wa_->val &= ~wa->mask; + } + + wal->wa_count++; + wa_->val |= wa->val; + wa_->mask |= wa->mask; + wa_->read |= wa->read; + return; + } + } + + wal->wa_count++; + wa_ = &wal->list[wal->count++]; + *wa_ = *wa; + + while (wa_-- > wal->list) { + GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) == + i915_mmio_reg_offset(wa_[1].reg)); + if (i915_mmio_reg_offset(wa_[1].reg) > + i915_mmio_reg_offset(wa_[0].reg)) + break; + + swap(wa_[1], wa_[0]); + } +} + +static void +wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, + u32 val) +{ + struct i915_wa wa = { + .reg = reg, + .mask = mask, + .val = val, + .read = mask, + }; + + _wa_add(wal, &wa); +} + +static void +wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val)); +} + +static void +wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, ~0, val); +} + +static void +wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, val, val); +} + +#define WA_SET_BIT_MASKED(addr, mask) \ + wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask)) + +#define WA_CLR_BIT_MASKED(addr, mask) \ + wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask)) + +#define WA_SET_FIELD_MASKED(addr, mask, value) \ + wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value))) + +static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); + + /* WaDisableAsyncFlipPerfMode:bdw,chv */ + WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); + + /* WaDisablePartialInstShootdown:bdw,chv */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); + + /* Use Force Non-Coherent whenever executing a 3D context. This is a + * workaround for for a possible hang in the unlikely event a TLB + * invalidation occurs during a PSD flush. + */ + /* WaForceEnableNonCoherent:bdw,chv */ + /* WaHdcDisableFetchWhenMasked:bdw,chv */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_DONOT_FETCH_MEM_WHEN_MASKED | + HDC_FORCE_NON_COHERENT); + + /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: + * "The Hierarchical Z RAW Stall Optimization allows non-overlapping + * polygons in the same 8x4 pixel/sample area to be processed without + * stalling waiting for the earlier ones to write to Hierarchical Z + * buffer." + * + * This optimization is off by default for BDW and CHV; turn it on. + */ + WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); + + /* Wa4x4STCOptimizationDisable:bdw,chv */ + WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); + + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + WA_SET_FIELD_MASKED(GEN7_GT_MODE, + GEN6_WIZ_HASHING_MASK, + GEN6_WIZ_HASHING_16x4); +} + +static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + + gen8_ctx_workarounds_init(engine, wal); + + /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + + /* WaDisableDopClockGating:bdw + * + * Also see the related UCGTCL1 write in broadwell_init_clock_gating() + * to disable EUTC clock gating. + */ + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + DOP_CLOCK_GATING_DISABLE); + + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + GEN8_SAMPLER_POWER_BYPASS_DIS); + + WA_SET_BIT_MASKED(HDC_CHICKEN0, + /* WaForceContextSaveRestoreNonCoherent:bdw */ + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | + /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ + (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); +} + +static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + gen8_ctx_workarounds_init(engine, wal); + + /* WaDisableThreadStallDopClockGating:chv */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + + /* Improve HiZ throughput on CHV. */ + WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); +} + +static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + + if (HAS_LLC(i915)) { + /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl + * + * Must match Display Engine. See + * WaCompressedResourceDisplayNewHashMode. + */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN9_PBE_COMPRESSED_HASH_SELECTION); + WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, + GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR); + } + + /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ + /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + FLOW_CONTROL_ENABLE | + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); + + /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ + /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ + WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, + GEN9_ENABLE_YV12_BUGFIX | + GEN9_ENABLE_GPGPU_PREEMPTION); + + /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */ + /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */ + WA_SET_BIT_MASKED(CACHE_MODE_1, + GEN8_4x4_STC_OPTIMIZATION_DISABLE | + GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE); + + /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */ + WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, + GEN9_CCS_TLB_PREFETCH_ENABLE); + + /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | + HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); + + /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are + * both tied to WaForceContextSaveRestoreNonCoherent + * in some hsds for skl. We keep the tie for all gen9. The + * documentation is a bit hazy and so we want to get common behaviour, + * even though there is no clear evidence we would need both on kbl/bxt. + * This area has been source of system hangs so we play it safe + * and mimic the skl regardless of what bspec says. + * + * Use Force Non-Coherent whenever executing a 3D context. This + * is a workaround for a possible hang in the unlikely event + * a TLB invalidation occurs during a PSD flush. + */ + + /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_NON_COHERENT); + + /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ + if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + GEN8_SAMPLER_POWER_BYPASS_DIS); + + /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */ + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); + + /* + * Supporting preemption with fine-granularity requires changes in the + * batch buffer programming. Since we can't break old userspace, we + * need to set our default preemption level to safe value. Userspace is + * still able to use more fine-grained preemption levels, since in + * WaEnablePreemptionGranularityControlByUMD we're whitelisting the + * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are + * not real HW workarounds, but merely a way to start using preemption + * while maintaining old contract with userspace. + */ + + /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */ + WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + + /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); + + /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ + if (IS_GEN9_LP(i915)) + WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); +} + +static void skl_tune_iz_hashing(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + u8 vals[3] = { 0, 0, 0 }; + unsigned int i; + + for (i = 0; i < 3; i++) { + u8 ss; + + /* + * Only consider slices where one, and only one, subslice has 7 + * EUs + */ + if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i])) + continue; + + /* + * subslice_7eu[i] != 0 (because of the check above) and + * ss_max == 4 (maximum number of subslices possible per slice) + * + * -> 0 <= ss <= 3; + */ + ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1; + vals[i] = 3 - ss; + } + + if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) + return; + + /* Tune IZ hashing. See intel_device_info_runtime_init() */ + WA_SET_FIELD_MASKED(GEN7_GT_MODE, + GEN9_IZ_HASHING_MASK(2) | + GEN9_IZ_HASHING_MASK(1) | + GEN9_IZ_HASHING_MASK(0), + GEN9_IZ_HASHING(2, vals[2]) | + GEN9_IZ_HASHING(1, vals[1]) | + GEN9_IZ_HASHING(0, vals[0])); +} + +static void skl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + gen9_ctx_workarounds_init(engine, wal); + skl_tune_iz_hashing(engine, wal); +} + +static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + gen9_ctx_workarounds_init(engine, wal); + + /* WaDisableThreadStallDopClockGating:bxt */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + STALL_DOP_GATING_DISABLE); + + /* WaToEnableHwFixForPushConstHWBug:bxt */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); +} + +static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + + gen9_ctx_workarounds_init(engine, wal); + + /* WaToEnableHwFixForPushConstHWBug:kbl */ + if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableSbeCacheDispatchPortSharing:kbl */ + WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, + GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); +} + +static void glk_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + gen9_ctx_workarounds_init(engine, wal); + + /* WaToEnableHwFixForPushConstHWBug:glk */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); +} + +static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + gen9_ctx_workarounds_init(engine, wal); + + /* WaToEnableHwFixForPushConstHWBug:cfl */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableSbeCacheDispatchPortSharing:cfl */ + WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, + GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); +} + +static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + + /* WaForceContextSaveRestoreNonCoherent:cnl */ + WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); + + /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */ + if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5); + + /* WaDisableReplayBufferBankArbitrationOptimization:cnl */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */ + if (IS_CNL_REVID(i915, 0, CNL_REVID_B0)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE); + + /* WaPushConstantDereferenceHoldDisable:cnl */ + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); + + /* FtrEnableFastAnisoL1BankingFix:cnl */ + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX); + + /* WaDisable3DMidCmdPreemption:cnl */ + WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + + /* WaDisableGPGPUMidCmdPreemption:cnl */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); + + /* WaDisableEarlyEOT:cnl */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT); +} + +static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + + /* WaDisableBankHangMode:icl */ + wa_write(wal, + GEN8_L3CNTLREG, + intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) | + GEN8_ERRDETBCTRL); + + /* Wa_1604370585:icl (pre-prod) + * Formerly known as WaPushConstantDereferenceHoldDisable + */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + PUSH_CONSTANT_DEREF_DISABLE); + + /* WaForceEnableNonCoherent:icl + * This is not the same workaround as in early Gen9 platforms, where + * lacking this could cause system hangs, but coherency performance + * overhead is high and only a few compute workloads really need it + * (the register is whitelisted in hardware now, so UMDs can opt in + * for coherency if they have a good reason). + */ + WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT); + + /* Wa_2006611047:icl (pre-prod) + * Formerly known as WaDisableImprovedTdlClkGating + */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + GEN11_TDL_CLOCK_GATING_FIX_DISABLE); + + /* Wa_2006665173:icl (pre-prod) */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) + WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, + GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); + + /* WaEnableFloatBlendOptimization:icl */ + wa_write_masked_or(wal, + GEN10_CACHE_MODE_SS, + 0, /* write-only, so skip validation */ + _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE)); + + /* WaDisableGPGPUMidThreadPreemption:icl */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); + + /* allow headerless messages for preemptible GPGPU context */ + WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE, + GEN11_SAMPLER_ENABLE_HEADLESS_MSG); +} + +static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + /* Wa_1409142259:tgl */ + WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, + GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); +} + +static void +__intel_engine_init_ctx_wa(struct intel_engine_cs *engine, + struct i915_wa_list *wal, + const char *name) +{ + struct drm_i915_private *i915 = engine->i915; + + if (engine->class != RENDER_CLASS) + return; + + wa_init_start(wal, name, engine->name); + + if (IS_GEN(i915, 12)) + tgl_ctx_workarounds_init(engine, wal); + else if (IS_GEN(i915, 11)) + icl_ctx_workarounds_init(engine, wal); + else if (IS_CANNONLAKE(i915)) + cnl_ctx_workarounds_init(engine, wal); + else if (IS_COFFEELAKE(i915)) + cfl_ctx_workarounds_init(engine, wal); + else if (IS_GEMINILAKE(i915)) + glk_ctx_workarounds_init(engine, wal); + else if (IS_KABYLAKE(i915)) + kbl_ctx_workarounds_init(engine, wal); + else if (IS_BROXTON(i915)) + bxt_ctx_workarounds_init(engine, wal); + else if (IS_SKYLAKE(i915)) + skl_ctx_workarounds_init(engine, wal); + else if (IS_CHERRYVIEW(i915)) + chv_ctx_workarounds_init(engine, wal); + else if (IS_BROADWELL(i915)) + bdw_ctx_workarounds_init(engine, wal); + else if (INTEL_GEN(i915) < 8) + return; + else + MISSING_CASE(INTEL_GEN(i915)); + + wa_init_finish(wal); +} + +void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) +{ + __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context"); +} + +int intel_engine_emit_ctx_wa(struct i915_request *rq) +{ + struct i915_wa_list *wal = &rq->engine->ctx_wa_list; + struct i915_wa *wa; + unsigned int i; + u32 *cs; + int ret; + + if (wal->count == 0) + return 0; + + ret = rq->engine->emit_flush(rq, EMIT_BARRIER); + if (ret) + return ret; + + cs = intel_ring_begin(rq, (wal->count * 2 + 2)); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(wal->count); + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + *cs++ = i915_mmio_reg_offset(wa->reg); + *cs++ = wa->val; + } + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + ret = rq->engine->emit_flush(rq, EMIT_BARRIER); + if (ret) + return ret; + + return 0; +} + +static void +gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + /* WaDisableKillLogic:bxt,skl,kbl */ + if (!IS_COFFEELAKE(i915)) + wa_write_or(wal, + GAM_ECOCHK, + ECOCHK_DIS_TLB); + + if (HAS_LLC(i915)) { + /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl + * + * Must match Display Engine. See + * WaCompressedResourceDisplayNewHashMode. + */ + wa_write_or(wal, + MMCD_MISC_CTRL, + MMCD_PCLA | MMCD_HOTSPOT_EN); + } + + /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ + wa_write_or(wal, + GAM_ECOCHK, + BDW_DISABLE_HDC_INVALIDATION); +} + +static void +skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); + + /* WaDisableGafsUnitClkGating:skl */ + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + + /* WaInPlaceDecompressionHang:skl */ + if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER)) + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); + + /* WaInPlaceDecompressionHang:bxt */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); + + /* WaDisableDynamicCreditSharing:kbl */ + if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); + + /* WaDisableGafsUnitClkGating:kbl */ + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + + /* WaInPlaceDecompressionHang:kbl */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); +} + +static void +cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); + + /* WaDisableGafsUnitClkGating:cfl */ + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + + /* WaInPlaceDecompressionHang:cfl */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + unsigned int slice, subslice; + u32 l3_en, mcr, mcr_mask; + + GEM_BUG_ON(INTEL_GEN(i915) < 10); + + /* + * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl + * L3Banks could be fused off in single slice scenario. If that is + * the case, we might need to program MCR select to a valid L3Bank + * by default, to make sure we correctly read certain registers + * later on (in the range 0xB100 - 0xB3FF). + * + * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl + * Before any MMIO read into slice/subslice specific registers, MCR + * packet control register needs to be programmed to point to any + * enabled s/ss pair. Otherwise, incorrect values will be returned. + * This means each subsequent MMIO read will be forwarded to an + * specific s/ss combination, but this is OK since these registers + * are consistent across s/ss in almost all cases. In the rare + * occasions, such as INSTDONE, where this value is dependent + * on s/ss combo, the read should be done with read_subslice_reg. + * + * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both + * to which subslice, or to which L3 bank, the respective mmio reads + * will go, we have to find a common index which works for both + * accesses. + * + * Case where we cannot find a common index fortunately should not + * happen in production hardware, so we only emit a warning instead of + * implementing something more complex that requires checking the range + * of every MMIO read. + */ + + if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) { + u32 l3_fuse = + intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) & + GEN10_L3BANK_MASK; + + DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse); + l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse); + } else { + l3_en = ~0; + } + + slice = fls(sseu->slice_mask) - 1; + subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice)); + if (!subslice) { + DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n", + intel_sseu_get_subslices(sseu, slice), l3_en); + subslice = fls(l3_en); + WARN_ON(!subslice); + } + subslice--; + + if (INTEL_GEN(i915) >= 11) { + mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); + mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; + } else { + mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + } + + DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr); + + wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); +} + +static void +cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + wa_init_mcr(i915, wal); + + /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ + if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); + + /* WaInPlaceDecompressionHang:cnl */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + wa_init_mcr(i915, wal); + + /* WaInPlaceDecompressionHang:icl */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + + /* WaModifyGamTlbPartitioning:icl */ + wa_write_masked_or(wal, + GEN11_GACB_PERF_CTRL, + GEN11_HASH_CTRL_MASK, + GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); + + /* Wa_1405766107:icl + * Formerly known as WaCL2SFHalfMaxAlloc + */ + wa_write_or(wal, + GEN11_LSN_UNSLCVC, + GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | + GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); + + /* Wa_220166154:icl + * Formerly known as WaDisCtxReload + */ + wa_write_or(wal, + GEN8_GAMW_ECO_DEV_RW_IA, + GAMW_ECO_DEV_CTX_RELOAD_DISABLE); + + /* Wa_1405779004:icl (pre-prod) */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) + wa_write_or(wal, + SLICE_UNIT_LEVEL_CLKGATE, + MSCUNIT_CLKGATE_DIS); + + /* Wa_1406680159:icl */ + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); + + /* Wa_1406838659:icl (pre-prod) */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + wa_write_or(wal, + INF_UNIT_LEVEL_CLKGATE, + CGPSF_CLKGATE_DIS); + + /* Wa_1406463099:icl + * Formerly known as WaGamTlbPendError + */ + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_L3_COH_PIPE); + + /* Wa_1607087056:icl */ + wa_write_or(wal, + SLICE_UNIT_LEVEL_CLKGATE, + L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); +} + +static void +tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + /* Wa_1409420604:tgl */ + if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE2, + CPSSUNIT_CLKGATE_DIS); + + /* Wa_1409180338:tgl */ + if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + wa_write_or(wal, + SLICE_UNIT_LEVEL_CLKGATE, + L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); +} + +static void +gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + if (IS_GEN(i915, 12)) + tgl_gt_workarounds_init(i915, wal); + else if (IS_GEN(i915, 11)) + icl_gt_workarounds_init(i915, wal); + else if (IS_CANNONLAKE(i915)) + cnl_gt_workarounds_init(i915, wal); + else if (IS_COFFEELAKE(i915)) + cfl_gt_workarounds_init(i915, wal); + else if (IS_GEMINILAKE(i915)) + glk_gt_workarounds_init(i915, wal); + else if (IS_KABYLAKE(i915)) + kbl_gt_workarounds_init(i915, wal); + else if (IS_BROXTON(i915)) + bxt_gt_workarounds_init(i915, wal); + else if (IS_SKYLAKE(i915)) + skl_gt_workarounds_init(i915, wal); + else if (INTEL_GEN(i915) <= 8) + return; + else + MISSING_CASE(INTEL_GEN(i915)); +} + +void intel_gt_init_workarounds(struct drm_i915_private *i915) +{ + struct i915_wa_list *wal = &i915->gt_wa_list; + + wa_init_start(wal, "GT", "global"); + gt_init_workarounds(i915, wal); + wa_init_finish(wal); +} + +static enum forcewake_domains +wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal) +{ + enum forcewake_domains fw = 0; + struct i915_wa *wa; + unsigned int i; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + fw |= intel_uncore_forcewake_for_reg(uncore, + wa->reg, + FW_REG_READ | + FW_REG_WRITE); + + return fw; +} + +static bool +wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from) +{ + if ((cur ^ wa->val) & wa->read) { + DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n", + name, from, i915_mmio_reg_offset(wa->reg), + cur, cur & wa->read, + wa->val, wa->mask); + + return false; + } + + return true; +} + +static void +wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal) +{ + enum forcewake_domains fw; + unsigned long flags; + struct i915_wa *wa; + unsigned int i; + + if (!wal->count) + return; + + fw = wal_get_fw_for_rmw(uncore, wal); + + spin_lock_irqsave(&uncore->lock, flags); + intel_uncore_forcewake_get__locked(uncore, fw); + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + wa_verify(wa, + intel_uncore_read_fw(uncore, wa->reg), + wal->name, "application"); + } + + intel_uncore_forcewake_put__locked(uncore, fw); + spin_unlock_irqrestore(&uncore->lock, flags); +} + +void intel_gt_apply_workarounds(struct intel_gt *gt) +{ + wa_list_apply(gt->uncore, >->i915->gt_wa_list); +} + +static bool wa_list_verify(struct intel_uncore *uncore, + const struct i915_wa_list *wal, + const char *from) +{ + struct i915_wa *wa; + unsigned int i; + bool ok = true; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + ok &= wa_verify(wa, + intel_uncore_read(uncore, wa->reg), + wal->name, from); + + return ok; +} + +bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) +{ + return wa_list_verify(gt->uncore, >->i915->gt_wa_list, from); +} + +static inline bool is_nonpriv_flags_valid(u32 flags) +{ + /* Check only valid flag bits are set */ + if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID) + return false; + + /* NB: Only 3 out of 4 enum values are valid for access field */ + if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == + RING_FORCE_TO_NONPRIV_ACCESS_INVALID) + return false; + + return true; +} + +static void +whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) +{ + struct i915_wa wa = { + .reg = reg + }; + + if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) + return; + + if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags))) + return; + + wa.reg.reg |= flags; + _wa_add(wal, &wa); +} + +static void +whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) +{ + whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW); +} + +static void gen9_whitelist_build(struct i915_wa_list *w) +{ + /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ + whitelist_reg(w, GEN9_CTX_PREEMPT_REG); + + /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ + whitelist_reg(w, GEN8_CS_CHICKEN1); + + /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */ + whitelist_reg(w, GEN8_HDC_CHICKEN1); + + /* WaSendPushConstantsFromMMIO:skl,bxt */ + whitelist_reg(w, COMMON_SLICE_CHICKEN2); +} + +static void skl_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + + gen9_whitelist_build(w); + + /* WaDisableLSQCROPERFforOCL:skl */ + whitelist_reg(w, GEN8_L3SQCREG4); +} + +static void bxt_whitelist_build(struct intel_engine_cs *engine) +{ + if (engine->class != RENDER_CLASS) + return; + + gen9_whitelist_build(&engine->whitelist); +} + +static void kbl_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + + gen9_whitelist_build(w); + + /* WaDisableLSQCROPERFforOCL:kbl */ + whitelist_reg(w, GEN8_L3SQCREG4); +} + +static void glk_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + + gen9_whitelist_build(w); + + /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ + whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); +} + +static void cfl_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + + gen9_whitelist_build(w); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); +} + +static void cnl_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + + /* WaEnablePreemptionGranularityControlByUMD:cnl */ + whitelist_reg(w, GEN8_CS_CHICKEN1); +} + +static void icl_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + switch (engine->class) { + case RENDER_CLASS: + /* WaAllowUMDToModifyHalfSliceChicken7:icl */ + whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7); + + /* WaAllowUMDToModifySamplerMode:icl */ + whitelist_reg(w, GEN10_SAMPLER_MODE); + + /* WaEnableStateCacheRedirectToCS:icl */ + whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); + break; + + case VIDEO_DECODE_CLASS: + /* hucStatusRegOffset */ + whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base), + RING_FORCE_TO_NONPRIV_ACCESS_RD); + /* hucUKernelHdrInfoRegOffset */ + whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base), + RING_FORCE_TO_NONPRIV_ACCESS_RD); + /* hucStatus2RegOffset */ + whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), + RING_FORCE_TO_NONPRIV_ACCESS_RD); + break; + + default: + break; + } +} + +static void tgl_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + switch (engine->class) { + case RENDER_CLASS: + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl + * + * This covers 4 registers which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); + break; + default: + break; + } +} + +void intel_engine_init_whitelist(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *w = &engine->whitelist; + + wa_init_start(w, "whitelist", engine->name); + + if (IS_GEN(i915, 12)) + tgl_whitelist_build(engine); + else if (IS_GEN(i915, 11)) + icl_whitelist_build(engine); + else if (IS_CANNONLAKE(i915)) + cnl_whitelist_build(engine); + else if (IS_COFFEELAKE(i915)) + cfl_whitelist_build(engine); + else if (IS_GEMINILAKE(i915)) + glk_whitelist_build(engine); + else if (IS_KABYLAKE(i915)) + kbl_whitelist_build(engine); + else if (IS_BROXTON(i915)) + bxt_whitelist_build(engine); + else if (IS_SKYLAKE(i915)) + skl_whitelist_build(engine); + else if (INTEL_GEN(i915) <= 8) + return; + else + MISSING_CASE(INTEL_GEN(i915)); + + wa_init_finish(w); +} + +void intel_engine_apply_whitelist(struct intel_engine_cs *engine) +{ + const struct i915_wa_list *wal = &engine->whitelist; + struct intel_uncore *uncore = engine->uncore; + const u32 base = engine->mmio_base; + struct i915_wa *wa; + unsigned int i; + + if (!wal->count) + return; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + intel_uncore_write(uncore, + RING_FORCE_TO_NONPRIV(base, i), + i915_mmio_reg_offset(wa->reg)); + + /* And clear the rest just in case of garbage */ + for (; i < RING_MAX_NONPRIV_SLOTS; i++) + intel_uncore_write(uncore, + RING_FORCE_TO_NONPRIV(base, i), + i915_mmio_reg_offset(RING_NOPID(base))); +} + +static void +rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + + if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { + /* Wa_1606700617:tgl */ + wa_masked_en(wal, + GEN9_CS_DEBUG_MODE1, + FF_DOP_CLOCK_GATE_DISABLE); + + /* Wa_1607138336:tgl */ + wa_write_or(wal, + GEN9_CTX_PREEMPT_REG, + GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); + + /* Wa_1607030317:tgl */ + /* Wa_1607186500:tgl */ + /* Wa_1607297627:tgl */ + wa_masked_en(wal, + GEN6_RC_SLEEP_PSMI_CONTROL, + GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | + GEN8_RC_SEMA_IDLE_MSG_DISABLE); + } + + if (IS_GEN(i915, 11)) { + /* This is not an Wa. Enable for better image quality */ + wa_masked_en(wal, + _3D_CHICKEN3, + _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); + + /* WaPipelineFlushCoherentLines:icl */ + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES); + + /* + * Wa_1405543622:icl + * Formerly known as WaGAPZPriorityScheme + */ + wa_write_or(wal, + GEN8_GARBCNTL, + GEN11_ARBITRATION_PRIO_ORDER_MASK); + + /* + * Wa_1604223664:icl + * Formerly known as WaL3BankAddressHashing + */ + wa_write_masked_or(wal, + GEN8_GARBCNTL, + GEN11_HASH_CTRL_EXCL_MASK, + GEN11_HASH_CTRL_EXCL_BIT0); + wa_write_masked_or(wal, + GEN11_GLBLINVL, + GEN11_BANK_HASH_ADDR_EXCL_MASK, + GEN11_BANK_HASH_ADDR_EXCL_BIT0); + + /* + * Wa_1405733216:icl + * Formerly known as WaDisableCleanEvicts + */ + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN11_LQSC_CLEAN_EVICT_DISABLE); + + /* WaForwardProgressSoftReset:icl */ + wa_write_or(wal, + GEN10_SCRATCH_LNCF2, + PMFLUSHDONE_LNICRSDROP | + PMFLUSH_GAPL3UNBLOCK | + PMFLUSHDONE_LNEBLK); + + /* Wa_1406609255:icl (pre-prod) */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_DEMAND_PREFETCH); + + /* Wa_1606682166:icl */ + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_SAMPLER_PREFETCH); + + /* Wa_1409178092:icl */ + wa_write_masked_or(wal, + GEN11_SCRATCH2, + GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE, + 0); + } + + if (IS_GEN_RANGE(i915, 9, 11)) { + /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */ + wa_masked_en(wal, + GEN7_FF_SLICE_CS_CHICKEN1, + GEN9_FFSC_PERCTX_PREEMPT_CTRL); + } + + if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { + /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */ + wa_write_or(wal, + GEN8_GARBCNTL, + GEN9_GAPS_TSV_CREDIT_DISABLE); + } + + if (IS_BROXTON(i915)) { + /* WaDisablePooledEuLoadBalancingFix:bxt */ + wa_masked_en(wal, + FF_SLICE_CS_CHICKEN2, + GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); + } + + if (IS_GEN(i915, 9)) { + /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ + wa_masked_en(wal, + GEN9_CSFE_CHICKEN1_RCS, + GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE); + + /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ + wa_write_or(wal, + BDW_SCRATCH1, + GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); + + /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ + if (IS_GEN9_LP(i915)) + wa_write_masked_or(wal, + GEN8_L3SQCREG1, + L3_PRIO_CREDITS_MASK, + L3_GENERAL_PRIO_CREDITS(62) | + L3_HIGH_PRIO_CREDITS(2)); + + /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES); + } +} + +static void +xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + + /* WaKBLVECSSemaphoreWaitPoll:kbl */ + if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { + wa_write(wal, + RING_SEMA_WAIT_POLL(engine->mmio_base), + 1); + } +} + +static void +engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal) +{ + if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8)) + return; + + if (engine->class == RENDER_CLASS) + rcs_engine_wa_init(engine, wal); + else + xcs_engine_wa_init(engine, wal); +} + +void intel_engine_init_workarounds(struct intel_engine_cs *engine) +{ + struct i915_wa_list *wal = &engine->wa_list; + + if (INTEL_GEN(engine->i915) < 8) + return; + + wa_init_start(wal, "engine", engine->name); + engine_init_workarounds(engine, wal); + wa_init_finish(wal); +} + +void intel_engine_apply_workarounds(struct intel_engine_cs *engine) +{ + wa_list_apply(engine->uncore, &engine->wa_list); +} + +static struct i915_vma * +create_scratch(struct i915_address_space *vm, int count) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + unsigned int size; + int err; + + size = round_up(count * sizeof(u32), PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, + i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER); + if (err) + goto err_obj; + + return vma; + +err_obj: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static bool mcr_range(struct drm_i915_private *i915, u32 offset) +{ + /* + * Registers in this range are affected by the MCR selector + * which only controls CPU initiated MMIO. Routing does not + * work for CS access so we cannot verify them on this path. + */ + if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff)) + return true; + + return false; +} + +static int +wa_list_srm(struct i915_request *rq, + const struct i915_wa_list *wal, + struct i915_vma *vma) +{ + struct drm_i915_private *i915 = rq->i915; + unsigned int i, count = 0; + const struct i915_wa *wa; + u32 srm, *cs; + + srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; + if (INTEL_GEN(i915) >= 8) + srm++; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg))) + count++; + } + + cs = intel_ring_begin(rq, 4 * count); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + u32 offset = i915_mmio_reg_offset(wa->reg); + + if (mcr_range(i915, offset)) + continue; + + *cs++ = srm; + *cs++ = offset; + *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; + *cs++ = 0; + } + intel_ring_advance(rq, cs); + + return 0; +} + +static int engine_wa_list_verify(struct intel_context *ce, + const struct i915_wa_list * const wal, + const char *from) +{ + const struct i915_wa *wa; + struct i915_request *rq; + struct i915_vma *vma; + unsigned int i; + u32 *results; + int err; + + if (!wal->count) + return 0; + + vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_vma; + } + + err = wa_list_srm(rq, wal, vma); + if (err) + goto err_vma; + + i915_request_add(rq); + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -ETIME; + goto err_vma; + } + + results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(results)) { + err = PTR_ERR(results); + goto err_vma; + } + + err = 0; + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg))) + continue; + + if (!wa_verify(wa, results[i], wal->name, from)) + err = -ENXIO; + } + + i915_gem_object_unpin_map(vma->obj); + +err_vma: + i915_vma_unpin(vma); + i915_vma_put(vma); + return err; +} + +int intel_engine_verify_workarounds(struct intel_engine_cs *engine, + const char *from) +{ + return engine_wa_list_verify(engine->kernel_context, + &engine->wa_list, + from); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_workarounds.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h new file mode 100644 index 000000000000..8c9c769c2204 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef _INTEL_WORKAROUNDS_H_ +#define _INTEL_WORKAROUNDS_H_ + +#include <linux/slab.h> + +#include "intel_workarounds_types.h" + +struct drm_i915_private; +struct i915_request; +struct intel_engine_cs; +struct intel_gt; + +static inline void intel_wa_list_free(struct i915_wa_list *wal) +{ + kfree(wal->list); + memset(wal, 0, sizeof(*wal)); +} + +void intel_engine_init_ctx_wa(struct intel_engine_cs *engine); +int intel_engine_emit_ctx_wa(struct i915_request *rq); + +void intel_gt_init_workarounds(struct drm_i915_private *i915); +void intel_gt_apply_workarounds(struct intel_gt *gt); +bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from); + +void intel_engine_init_whitelist(struct intel_engine_cs *engine); +void intel_engine_apply_whitelist(struct intel_engine_cs *engine); + +void intel_engine_init_workarounds(struct intel_engine_cs *engine); +void intel_engine_apply_workarounds(struct intel_engine_cs *engine); +int intel_engine_verify_workarounds(struct intel_engine_cs *engine, + const char *from); + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h new file mode 100644 index 000000000000..e27ab1b710b3 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef __INTEL_WORKAROUNDS_TYPES_H__ +#define __INTEL_WORKAROUNDS_TYPES_H__ + +#include <linux/types.h> + +#include "i915_reg.h" + +struct i915_wa { + i915_reg_t reg; + u32 mask; + u32 val; + u32 read; +}; + +struct i915_wa_list { + const char *name; + const char *engine_name; + struct i915_wa *list; + unsigned int count; + unsigned int wa_count; +}; + +#endif /* __INTEL_WORKAROUNDS_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c new file mode 100644 index 000000000000..83f549d203a0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -0,0 +1,338 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "gem/i915_gem_context.h" +#include "gt/intel_ring.h" + +#include "i915_drv.h" +#include "intel_context.h" +#include "intel_engine_pm.h" +#include "intel_engine_pool.h" + +#include "mock_engine.h" +#include "selftests/mock_request.h" + +static void mock_timeline_pin(struct intel_timeline *tl) +{ + atomic_inc(&tl->pin_count); +} + +static void mock_timeline_unpin(struct intel_timeline *tl) +{ + GEM_BUG_ON(!atomic_read(&tl->pin_count)); + atomic_dec(&tl->pin_count); +} + +static struct intel_ring *mock_ring(struct intel_engine_cs *engine) +{ + const unsigned long sz = PAGE_SIZE / 2; + struct intel_ring *ring; + + ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); + if (!ring) + return NULL; + + kref_init(&ring->ref); + ring->size = sz; + ring->effective_size = sz; + ring->vaddr = (void *)(ring + 1); + atomic_set(&ring->pin_count, 1); + + intel_ring_update_space(ring); + + return ring; +} + +static struct i915_request *first_request(struct mock_engine *engine) +{ + return list_first_entry_or_null(&engine->hw_queue, + struct i915_request, + mock.link); +} + +static void advance(struct i915_request *request) +{ + list_del_init(&request->mock.link); + i915_request_mark_complete(request); + GEM_BUG_ON(!i915_request_completed(request)); + + intel_engine_queue_breadcrumbs(request->engine); +} + +static void hw_delay_complete(struct timer_list *t) +{ + struct mock_engine *engine = from_timer(engine, t, hw_delay); + struct i915_request *request; + unsigned long flags; + + spin_lock_irqsave(&engine->hw_lock, flags); + + /* Timer fired, first request is complete */ + request = first_request(engine); + if (request) + advance(request); + + /* + * Also immediately signal any subsequent 0-delay requests, but + * requeue the timer for the next delayed request. + */ + while ((request = first_request(engine))) { + if (request->mock.delay) { + mod_timer(&engine->hw_delay, + jiffies + request->mock.delay); + break; + } + + advance(request); + } + + spin_unlock_irqrestore(&engine->hw_lock, flags); +} + +static void mock_context_unpin(struct intel_context *ce) +{ +} + +static void mock_context_destroy(struct kref *ref) +{ + struct intel_context *ce = container_of(ref, typeof(*ce), ref); + + GEM_BUG_ON(intel_context_is_pinned(ce)); + + if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { + kfree(ce->ring); + mock_timeline_unpin(ce->timeline); + } + + intel_context_fini(ce); + intel_context_free(ce); +} + +static int mock_context_alloc(struct intel_context *ce) +{ + ce->ring = mock_ring(ce->engine); + if (!ce->ring) + return -ENOMEM; + + GEM_BUG_ON(ce->timeline); + ce->timeline = intel_timeline_create(ce->engine->gt, NULL); + if (IS_ERR(ce->timeline)) { + kfree(ce->engine); + return PTR_ERR(ce->timeline); + } + + mock_timeline_pin(ce->timeline); + + return 0; +} + +static int mock_context_pin(struct intel_context *ce) +{ + return intel_context_active_acquire(ce); +} + +static const struct intel_context_ops mock_context_ops = { + .alloc = mock_context_alloc, + + .pin = mock_context_pin, + .unpin = mock_context_unpin, + + .enter = intel_context_enter_engine, + .exit = intel_context_exit_engine, + + .destroy = mock_context_destroy, +}; + +static int mock_request_alloc(struct i915_request *request) +{ + INIT_LIST_HEAD(&request->mock.link); + request->mock.delay = 0; + + return 0; +} + +static int mock_emit_flush(struct i915_request *request, + unsigned int flags) +{ + return 0; +} + +static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs) +{ + return cs; +} + +static void mock_submit_request(struct i915_request *request) +{ + struct mock_engine *engine = + container_of(request->engine, typeof(*engine), base); + unsigned long flags; + + i915_request_submit(request); + + spin_lock_irqsave(&engine->hw_lock, flags); + list_add_tail(&request->mock.link, &engine->hw_queue); + if (list_is_first(&request->mock.link, &engine->hw_queue)) { + if (request->mock.delay) + mod_timer(&engine->hw_delay, + jiffies + request->mock.delay); + else + advance(request); + } + spin_unlock_irqrestore(&engine->hw_lock, flags); +} + +static void mock_reset_prepare(struct intel_engine_cs *engine) +{ +} + +static void mock_reset(struct intel_engine_cs *engine, bool stalled) +{ + GEM_BUG_ON(stalled); +} + +static void mock_reset_finish(struct intel_engine_cs *engine) +{ +} + +static void mock_cancel_requests(struct intel_engine_cs *engine) +{ + struct i915_request *request; + unsigned long flags; + + spin_lock_irqsave(&engine->active.lock, flags); + + /* Mark all submitted requests as skipped. */ + list_for_each_entry(request, &engine->active.requests, sched.link) { + if (!i915_request_signaled(request)) + dma_fence_set_error(&request->fence, -EIO); + + i915_request_mark_complete(request); + } + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, + const char *name, + int id) +{ + struct mock_engine *engine; + + GEM_BUG_ON(id >= I915_NUM_ENGINES); + GEM_BUG_ON(!i915->gt.uncore); + + engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL); + if (!engine) + return NULL; + + /* minimal engine setup for requests */ + engine->base.i915 = i915; + engine->base.gt = &i915->gt; + engine->base.uncore = i915->gt.uncore; + snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); + engine->base.id = id; + engine->base.mask = BIT(id); + engine->base.legacy_idx = INVALID_ENGINE; + engine->base.instance = id; + engine->base.status_page.addr = (void *)(engine + 1); + + engine->base.cops = &mock_context_ops; + engine->base.request_alloc = mock_request_alloc; + engine->base.emit_flush = mock_emit_flush; + engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb; + engine->base.submit_request = mock_submit_request; + + engine->base.reset.prepare = mock_reset_prepare; + engine->base.reset.reset = mock_reset; + engine->base.reset.finish = mock_reset_finish; + engine->base.cancel_requests = mock_cancel_requests; + + i915->gt.engine[id] = &engine->base; + i915->gt.engine_class[0][id] = &engine->base; + + /* fake hw queue */ + spin_lock_init(&engine->hw_lock); + timer_setup(&engine->hw_delay, hw_delay_complete, 0); + INIT_LIST_HEAD(&engine->hw_queue); + + intel_engine_add_user(&engine->base); + + return &engine->base; +} + +int mock_engine_init(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + + intel_engine_init_active(engine, ENGINE_MOCK); + intel_engine_init_breadcrumbs(engine); + intel_engine_init_execlists(engine); + intel_engine_init__pm(engine); + intel_engine_pool_init(&engine->pool); + + ce = create_kernel_context(engine); + if (IS_ERR(ce)) + goto err_breadcrumbs; + + engine->kernel_context = ce; + return 0; + +err_breadcrumbs: + intel_engine_fini_breadcrumbs(engine); + return -ENOMEM; +} + +void mock_engine_flush(struct intel_engine_cs *engine) +{ + struct mock_engine *mock = + container_of(engine, typeof(*mock), base); + struct i915_request *request, *rn; + + del_timer_sync(&mock->hw_delay); + + spin_lock_irq(&mock->hw_lock); + list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link) + advance(request); + spin_unlock_irq(&mock->hw_lock); +} + +void mock_engine_reset(struct intel_engine_cs *engine) +{ +} + +void mock_engine_free(struct intel_engine_cs *engine) +{ + struct mock_engine *mock = + container_of(engine, typeof(*mock), base); + + GEM_BUG_ON(timer_pending(&mock->hw_delay)); + + intel_context_unpin(engine->kernel_context); + intel_context_put(engine->kernel_context); + + intel_engine_fini_breadcrumbs(engine); + + kfree(engine); +} diff --git a/drivers/gpu/drm/i915/gt/mock_engine.h b/drivers/gpu/drm/i915/gt/mock_engine.h new file mode 100644 index 000000000000..3f9b698c49d2 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/mock_engine.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __MOCK_ENGINE_H__ +#define __MOCK_ENGINE_H__ + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/timer.h> + +#include "gt/intel_engine.h" + +struct mock_engine { + struct intel_engine_cs base; + + spinlock_t hw_lock; + struct list_head hw_queue; + struct timer_list hw_delay; +}; + +struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, + const char *name, + int id); +int mock_engine_init(struct intel_engine_cs *engine); + +void mock_engine_flush(struct intel_engine_cs *engine); +void mock_engine_reset(struct intel_engine_cs *engine); +void mock_engine_free(struct intel_engine_cs *engine); + +#endif /* !__MOCK_ENGINE_H__ */ diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c new file mode 100644 index 000000000000..bc720defc6b8 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -0,0 +1,443 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_selftest.h" +#include "intel_engine_pm.h" +#include "intel_gt.h" + +#include "gem/selftests/mock_context.h" +#include "selftests/igt_flush_test.h" +#include "selftests/mock_drm.h" + +static int request_sync(struct i915_request *rq) +{ + struct intel_timeline *tl = i915_request_timeline(rq); + long timeout; + int err = 0; + + intel_timeline_get(tl); + i915_request_get(rq); + + /* Opencode i915_request_add() so we can keep the timeline locked. */ + __i915_request_commit(rq); + __i915_request_queue(rq, NULL); + + timeout = i915_request_wait(rq, 0, HZ / 10); + if (timeout < 0) + err = timeout; + else + i915_request_retire_upto(rq); + + lockdep_unpin_lock(&tl->mutex, rq->cookie); + mutex_unlock(&tl->mutex); + + i915_request_put(rq); + intel_timeline_put(tl); + + return err; +} + +static int context_sync(struct intel_context *ce) +{ + struct intel_timeline *tl = ce->timeline; + int err = 0; + + mutex_lock(&tl->mutex); + do { + struct dma_fence *fence; + long timeout; + + fence = i915_active_fence_get(&tl->last_request); + if (!fence) + break; + + timeout = dma_fence_wait_timeout(fence, false, HZ / 10); + if (timeout < 0) + err = timeout; + else + i915_request_retire_upto(to_request(fence)); + + dma_fence_put(fence); + } while (!err); + mutex_unlock(&tl->mutex); + + return err; +} + +static int __live_context_size(struct intel_engine_cs *engine, + struct i915_gem_context *fixme) +{ + struct intel_context *ce; + struct i915_request *rq; + void *vaddr; + int err; + + ce = intel_context_create(fixme, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_pin(ce); + if (err) + goto err; + + vaddr = i915_gem_object_pin_map(ce->state->obj, + i915_coherent_map_type(engine->i915)); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + intel_context_unpin(ce); + goto err; + } + + /* + * Note that execlists also applies a redzone which it checks on + * context unpin when debugging. We are using the same location + * and same poison value so that our checks overlap. Despite the + * redundancy, we want to keep this little selftest so that we + * get coverage of any and all submission backends, and we can + * always extend this test to ensure we trick the HW into a + * compromising position wrt to the various sections that need + * to be written into the context state. + * + * TLDR; this overlaps with the execlists redzone. + */ + vaddr += engine->context_size - I915_GTT_PAGE_SIZE; + memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); + + rq = intel_context_create_request(ce); + intel_context_unpin(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + err = request_sync(rq); + if (err) + goto err_unpin; + + /* Force the context switch */ + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + err = request_sync(rq); + if (err) + goto err_unpin; + + if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) { + pr_err("%s context overwrote trailing red-zone!", engine->name); + err = -EINVAL; + } + +err_unpin: + i915_gem_object_unpin_map(ce->state->obj); +err: + intel_context_put(ce); + return err; +} + +static int live_context_size(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *fixme; + enum intel_engine_id id; + int err = 0; + + /* + * Check that our context sizes are correct by seeing if the + * HW tries to write past the end of one. + */ + + fixme = kernel_context(gt->i915); + if (IS_ERR(fixme)) + return PTR_ERR(fixme); + + for_each_engine(engine, gt, id) { + struct { + struct drm_i915_gem_object *state; + void *pinned; + } saved; + + if (!engine->context_size) + continue; + + intel_engine_pm_get(engine); + + /* + * Hide the old default state -- we lie about the context size + * and get confused when the default state is smaller than + * expected. For our do nothing request, inheriting the + * active state is sufficient, we are only checking that we + * don't use more than we planned. + */ + saved.state = fetch_and_zero(&engine->default_state); + saved.pinned = fetch_and_zero(&engine->pinned_default_state); + + /* Overlaps with the execlists redzone */ + engine->context_size += I915_GTT_PAGE_SIZE; + + err = __live_context_size(engine, fixme); + + engine->context_size -= I915_GTT_PAGE_SIZE; + + engine->pinned_default_state = saved.pinned; + engine->default_state = saved.state; + + intel_engine_pm_put(engine); + + if (err) + break; + } + + kernel_context_close(fixme); + return err; +} + +static int __live_active_context(struct intel_engine_cs *engine, + struct i915_gem_context *fixme) +{ + struct intel_context *ce; + int pass; + int err; + + /* + * We keep active contexts alive until after a subsequent context + * switch as the final write from the context-save will be after + * we retire the final request. We track when we unpin the context, + * under the presumption that the final pin is from the last request, + * and instead of immediately unpinning the context, we add a task + * to unpin the context from the next idle-barrier. + * + * This test makes sure that the context is kept alive until a + * subsequent idle-barrier (emitted when the engine wakeref hits 0 + * with no more outstanding requests). + */ + + if (intel_engine_pm_is_awake(engine)) { + pr_err("%s is awake before starting %s!\n", + engine->name, __func__); + return -EINVAL; + } + + ce = intel_context_create(fixme, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + for (pass = 0; pass <= 2; pass++) { + struct i915_request *rq; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err; + } + + err = request_sync(rq); + if (err) + goto err; + + /* Context will be kept active until after an idle-barrier. */ + if (i915_active_is_idle(&ce->active)) { + pr_err("context is not active; expected idle-barrier (%s pass %d)\n", + engine->name, pass); + err = -EINVAL; + goto err; + } + + if (!intel_engine_pm_is_awake(engine)) { + pr_err("%s is asleep before idle-barrier\n", + engine->name); + err = -EINVAL; + goto err; + } + } + + /* Now make sure our idle-barriers are flushed */ + err = context_sync(engine->kernel_context); + if (err) + goto err; + + if (!i915_active_is_idle(&ce->active)) { + pr_err("context is still active!"); + err = -EINVAL; + } + + if (intel_engine_pm_is_awake(engine)) { + struct drm_printer p = drm_debug_printer(__func__); + + intel_engine_dump(engine, &p, + "%s is still awake after idle-barriers\n", + engine->name); + GEM_TRACE_DUMP(); + + err = -EINVAL; + goto err; + } + +err: + intel_context_put(ce); + return err; +} + +static int live_active_context(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *fixme; + enum intel_engine_id id; + struct drm_file *file; + int err = 0; + + file = mock_file(gt->i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + fixme = live_context(gt->i915, file); + if (IS_ERR(fixme)) { + err = PTR_ERR(fixme); + goto out_file; + } + + for_each_engine(engine, gt, id) { + err = __live_active_context(engine, fixme); + if (err) + break; + + err = igt_flush_test(gt->i915); + if (err) + break; + } + +out_file: + mock_file_free(gt->i915, file); + return err; +} + +static int __remote_sync(struct intel_context *ce, struct intel_context *remote) +{ + struct i915_request *rq; + int err; + + err = intel_context_pin(remote); + if (err) + return err; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto unpin; + } + + err = intel_context_prepare_remote_request(remote, rq); + if (err) { + i915_request_add(rq); + goto unpin; + } + + err = request_sync(rq); + +unpin: + intel_context_unpin(remote); + return err; +} + +static int __live_remote_context(struct intel_engine_cs *engine, + struct i915_gem_context *fixme) +{ + struct intel_context *local, *remote; + int pass; + int err; + + /* + * Check that our idle barriers do not interfere with normal + * activity tracking. In particular, check that operating + * on the context image remotely (intel_context_prepare_remote_request), + * which inserts foreign fences into intel_context.active, does not + * clobber the idle-barrier. + */ + + remote = intel_context_create(fixme, engine); + if (IS_ERR(remote)) + return PTR_ERR(remote); + + local = intel_context_create(fixme, engine); + if (IS_ERR(local)) { + err = PTR_ERR(local); + goto err_remote; + } + + for (pass = 0; pass <= 2; pass++) { + err = __remote_sync(local, remote); + if (err) + break; + + err = __remote_sync(engine->kernel_context, remote); + if (err) + break; + + if (i915_active_is_idle(&remote->active)) { + pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n", + engine->name, pass); + err = -EINVAL; + break; + } + } + + intel_context_put(local); +err_remote: + intel_context_put(remote); + return err; +} + +static int live_remote_context(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *fixme; + enum intel_engine_id id; + struct drm_file *file; + int err = 0; + + file = mock_file(gt->i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + fixme = live_context(gt->i915, file); + if (IS_ERR(fixme)) { + err = PTR_ERR(fixme); + goto out_file; + } + + for_each_engine(engine, gt, id) { + err = __live_remote_context(engine, fixme); + if (err) + break; + + err = igt_flush_test(gt->i915); + if (err) + break; + } + +out_file: + mock_file_free(gt->i915, file); + return err; +} + +int intel_context_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_context_size), + SUBTEST(live_active_context), + SUBTEST(live_remote_context), + }; + struct intel_gt *gt = &i915->gt; + + if (intel_gt_is_wedged(gt)) + return 0; + + return intel_gt_live_subtests(tests, gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c new file mode 100644 index 000000000000..f65b118e261d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_engine.c @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright © 2018 Intel Corporation + */ + +#include "i915_selftest.h" +#include "selftest_engine.h" + +int intel_engine_live_selftests(struct drm_i915_private *i915) +{ + static int (* const tests[])(struct intel_gt *) = { + live_engine_pm_selftests, + NULL, + }; + struct intel_gt *gt = &i915->gt; + typeof(*tests) *fn; + + for (fn = tests; *fn; fn++) { + int err; + + err = (*fn)(gt); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.h b/drivers/gpu/drm/i915/gt/selftest_engine.h new file mode 100644 index 000000000000..ab32d09ec5a1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_engine.h @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef SELFTEST_ENGINE_H +#define SELFTEST_ENGINE_H + +struct intel_gt; + +int live_engine_pm_selftests(struct intel_gt *gt); + +#endif diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c new file mode 100644 index 000000000000..3880f07c29b8 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright © 2018 Intel Corporation + */ + +#include "../i915_selftest.h" + +static int intel_mmio_bases_check(void *arg) +{ + int i, j; + + for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { + const struct engine_info *info = &intel_engines[i]; + u8 prev = U8_MAX; + + for (j = 0; j < MAX_MMIO_BASES; j++) { + u8 gen = info->mmio_bases[j].gen; + u32 base = info->mmio_bases[j].base; + + if (gen >= prev) { + pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n", + __func__, + intel_engine_class_repr(info->class), + info->class, info->instance, + prev, gen); + return -EINVAL; + } + + if (gen == 0) + break; + + if (!base) { + pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n", + __func__, + intel_engine_class_repr(info->class), + info->class, info->instance, + base, gen, j); + return -EINVAL; + } + + prev = gen; + } + + pr_debug("%s: min gen supported for %s%d is %d\n", + __func__, + intel_engine_class_repr(info->class), + info->instance, + prev); + } + + return 0; +} + +int intel_engine_cs_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(intel_mmio_bases_check), + }; + + return i915_subtests(tests, NULL); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c new file mode 100644 index 000000000000..e864406bd2d9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -0,0 +1,350 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include <linux/sort.h> + +#include "i915_drv.h" + +#include "intel_gt_requests.h" +#include "i915_selftest.h" + +struct pulse { + struct i915_active active; + struct kref kref; +}; + +static int pulse_active(struct i915_active *active) +{ + kref_get(&container_of(active, struct pulse, active)->kref); + return 0; +} + +static void pulse_free(struct kref *kref) +{ + kfree(container_of(kref, struct pulse, kref)); +} + +static void pulse_put(struct pulse *p) +{ + kref_put(&p->kref, pulse_free); +} + +static void pulse_retire(struct i915_active *active) +{ + pulse_put(container_of(active, struct pulse, active)); +} + +static struct pulse *pulse_create(void) +{ + struct pulse *p; + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return p; + + kref_init(&p->kref); + i915_active_init(&p->active, pulse_active, pulse_retire); + + return p; +} + +static void pulse_unlock_wait(struct pulse *p) +{ + mutex_lock(&p->active.mutex); + mutex_unlock(&p->active.mutex); + flush_work(&p->active.work); +} + +static int __live_idle_pulse(struct intel_engine_cs *engine, + int (*fn)(struct intel_engine_cs *cs)) +{ + struct pulse *p; + int err; + + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + + p = pulse_create(); + if (!p) + return -ENOMEM; + + err = i915_active_acquire(&p->active); + if (err) + goto out; + + err = i915_active_acquire_preallocate_barrier(&p->active, engine); + if (err) { + i915_active_release(&p->active); + goto out; + } + + i915_active_acquire_barrier(&p->active); + i915_active_release(&p->active); + + GEM_BUG_ON(i915_active_is_idle(&p->active)); + GEM_BUG_ON(llist_empty(&engine->barrier_tasks)); + + err = fn(engine); + if (err) + goto out; + + GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); + + if (intel_gt_retire_requests_timeout(engine->gt, HZ / 5)) { + err = -ETIME; + goto out; + } + + GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial); + + pulse_unlock_wait(p); /* synchronize with the retirement callback */ + + if (!i915_active_is_idle(&p->active)) { + struct drm_printer m = drm_err_printer("pulse"); + + pr_err("%s: heartbeat pulse did not flush idle tasks\n", + engine->name); + i915_active_print(&p->active, &m); + + err = -EINVAL; + goto out; + } + +out: + pulse_put(p); + return err; +} + +static int live_idle_flush(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that we can flush the idle barriers */ + + for_each_engine(engine, gt, id) { + intel_engine_pm_get(engine); + err = __live_idle_pulse(engine, intel_engine_flush_barriers); + intel_engine_pm_put(engine); + if (err) + break; + } + + return err; +} + +static int live_idle_pulse(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that heartbeat pulses flush the idle barriers */ + + for_each_engine(engine, gt, id) { + intel_engine_pm_get(engine); + err = __live_idle_pulse(engine, intel_engine_pulse); + intel_engine_pm_put(engine); + if (err && err != -ENODEV) + break; + + err = 0; + } + + return err; +} + +static int cmp_u32(const void *_a, const void *_b) +{ + const u32 *a = _a, *b = _b; + + return *a - *b; +} + +static int __live_heartbeat_fast(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct i915_request *rq; + ktime_t t0, t1; + u32 times[5]; + int err; + int i; + + ce = intel_context_create(engine->kernel_context->gem_context, + engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + intel_engine_pm_get(engine); + + err = intel_engine_set_heartbeat(engine, 1); + if (err) + goto err_pm; + + for (i = 0; i < ARRAY_SIZE(times); i++) { + /* Manufacture a tick */ + do { + while (READ_ONCE(engine->heartbeat.systole)) + flush_delayed_work(&engine->heartbeat.work); + + engine->serial++; /* quick, pretend we are not idle! */ + flush_delayed_work(&engine->heartbeat.work); + if (!delayed_work_pending(&engine->heartbeat.work)) { + pr_err("%s: heartbeat did not start\n", + engine->name); + err = -EINVAL; + goto err_pm; + } + + rcu_read_lock(); + rq = READ_ONCE(engine->heartbeat.systole); + if (rq) + rq = i915_request_get_rcu(rq); + rcu_read_unlock(); + } while (!rq); + + t0 = ktime_get(); + while (rq == READ_ONCE(engine->heartbeat.systole)) + yield(); /* work is on the local cpu! */ + t1 = ktime_get(); + + i915_request_put(rq); + times[i] = ktime_us_delta(t1, t0); + } + + sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL); + + pr_info("%s: Heartbeat delay: %uus [%u, %u]\n", + engine->name, + times[ARRAY_SIZE(times) / 2], + times[0], + times[ARRAY_SIZE(times) - 1]); + + /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */ + if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) { + pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n", + engine->name, + times[ARRAY_SIZE(times) / 2], + jiffies_to_usecs(6)); + err = -EINVAL; + } + + intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL); +err_pm: + intel_engine_pm_put(engine); + intel_context_put(ce); + return err; +} + +static int live_heartbeat_fast(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that the heartbeat ticks at the desired rate. */ + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) + return 0; + + for_each_engine(engine, gt, id) { + err = __live_heartbeat_fast(engine); + if (err) + break; + } + + return err; +} + +static int __live_heartbeat_off(struct intel_engine_cs *engine) +{ + int err; + + intel_engine_pm_get(engine); + + engine->serial++; + flush_delayed_work(&engine->heartbeat.work); + if (!delayed_work_pending(&engine->heartbeat.work)) { + pr_err("%s: heartbeat not running\n", + engine->name); + err = -EINVAL; + goto err_pm; + } + + err = intel_engine_set_heartbeat(engine, 0); + if (err) + goto err_pm; + + engine->serial++; + flush_delayed_work(&engine->heartbeat.work); + if (delayed_work_pending(&engine->heartbeat.work)) { + pr_err("%s: heartbeat still running\n", + engine->name); + err = -EINVAL; + goto err_beat; + } + + if (READ_ONCE(engine->heartbeat.systole)) { + pr_err("%s: heartbeat still allocated\n", + engine->name); + err = -EINVAL; + goto err_beat; + } + +err_beat: + intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL); +err_pm: + intel_engine_pm_put(engine); + return err; +} + +static int live_heartbeat_off(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that we can turn off heartbeat and not interrupt VIP */ + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) + return 0; + + for_each_engine(engine, gt, id) { + if (!intel_engine_has_preemption(engine)) + continue; + + err = __live_heartbeat_off(engine); + if (err) + break; + } + + return err; +} + +int intel_heartbeat_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_idle_flush), + SUBTEST(live_idle_pulse), + SUBTEST(live_heartbeat_fast), + SUBTEST(live_heartbeat_off), + }; + int saved_hangcheck; + int err; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + saved_hangcheck = i915_modparams.enable_hangcheck; + i915_modparams.enable_hangcheck = INT_MAX; + + err = intel_gt_live_subtests(tests, &i915->gt); + + i915_modparams.enable_hangcheck = saved_hangcheck; + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c new file mode 100644 index 000000000000..20b9c83f43ad --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright © 2018 Intel Corporation + */ + +#include "i915_selftest.h" +#include "selftest_engine.h" +#include "selftests/igt_atomic.h" + +static int live_engine_pm(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* + * Check we can call intel_engine_pm_put from any context. No + * failures are reported directly, but if we mess up lockdep should + * tell us. + */ + if (intel_gt_pm_wait_for_idle(gt)) { + pr_err("Unable to flush GT pm before test\n"); + return -EBUSY; + } + + GEM_BUG_ON(intel_gt_pm_is_awake(gt)); + for_each_engine(engine, gt, id) { + const typeof(*igt_atomic_phases) *p; + + for (p = igt_atomic_phases; p->name; p++) { + /* + * Acquisition is always synchronous, except if we + * know that the engine is already awake, in which + * case we should use intel_engine_pm_get_if_awake() + * to atomically grab the wakeref. + * + * In practice, + * intel_engine_pm_get(); + * intel_engine_pm_put(); + * occurs in one thread, while simultaneously + * intel_engine_pm_get_if_awake(); + * intel_engine_pm_put(); + * occurs from atomic context in another. + */ + GEM_BUG_ON(intel_engine_pm_is_awake(engine)); + intel_engine_pm_get(engine); + + p->critical_section_begin(); + if (!intel_engine_pm_get_if_awake(engine)) + pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n", + engine->name, p->name); + else + intel_engine_pm_put(engine); + intel_engine_pm_put(engine); + p->critical_section_end(); + + /* engine wakeref is sync (instant) */ + if (intel_engine_pm_is_awake(engine)) { + pr_err("%s is still awake after flushing pm\n", + engine->name); + return -EINVAL; + } + + /* gt wakeref is async (deferred to workqueue) */ + if (intel_gt_pm_wait_for_idle(gt)) { + pr_err("GT failed to idle\n"); + return -EINVAL; + } + } + } + + return 0; +} + +int live_engine_pm_selftests(struct intel_gt *gt) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_engine_pm), + }; + + return intel_gt_live_subtests(tests, gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c new file mode 100644 index 000000000000..d1752f15702a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -0,0 +1,60 @@ + +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "selftest_llc.h" + +static int live_gt_resume(void *arg) +{ + struct intel_gt *gt = arg; + IGT_TIMEOUT(end_time); + int err; + + /* Do several suspend/resume cycles to check we don't explode! */ + do { + intel_gt_suspend_prepare(gt); + intel_gt_suspend_late(gt); + + if (gt->rc6.enabled) { + pr_err("rc6 still enabled after suspend!\n"); + intel_gt_set_wedged_on_init(gt); + err = -EINVAL; + break; + } + + err = intel_gt_resume(gt); + if (err) + break; + + if (gt->rc6.supported && !gt->rc6.enabled) { + pr_err("rc6 not enabled upon resume!\n"); + intel_gt_set_wedged_on_init(gt); + err = -EINVAL; + break; + } + + err = st_llc_verify(>->llc); + if (err) { + pr_err("llc state not restored upon resume!\n"); + intel_gt_set_wedged_on_init(gt); + break; + } + } while (!__igt_timeout(end_time, NULL)); + + return err; +} + +int intel_gt_pm_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_gt_resume), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c new file mode 100644 index 000000000000..85e9ccf5c304 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -0,0 +1,1712 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/kthread.h> + +#include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" +#include "intel_engine_pm.h" + +#include "i915_selftest.h" +#include "selftests/i915_random.h" +#include "selftests/igt_flush_test.h" +#include "selftests/igt_reset.h" +#include "selftests/igt_atomic.h" + +#include "selftests/mock_drm.h" + +#include "gem/selftests/mock_context.h" +#include "gem/selftests/igt_gem_utils.h" + +#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */ + +struct hang { + struct intel_gt *gt; + struct drm_i915_gem_object *hws; + struct drm_i915_gem_object *obj; + struct i915_gem_context *ctx; + u32 *seqno; + u32 *batch; +}; + +static int hang_init(struct hang *h, struct intel_gt *gt) +{ + void *vaddr; + int err; + + memset(h, 0, sizeof(*h)); + h->gt = gt; + + h->ctx = kernel_context(gt->i915); + if (IS_ERR(h->ctx)) + return PTR_ERR(h->ctx); + + GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); + + h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(h->hws)) { + err = PTR_ERR(h->hws); + goto err_ctx; + } + + h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(h->obj)) { + err = PTR_ERR(h->obj); + goto err_hws; + } + + i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); + vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + h->seqno = memset(vaddr, 0xff, PAGE_SIZE); + + vaddr = i915_gem_object_pin_map(h->obj, + i915_coherent_map_type(gt->i915)); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_unpin_hws; + } + h->batch = vaddr; + + return 0; + +err_unpin_hws: + i915_gem_object_unpin_map(h->hws); +err_obj: + i915_gem_object_put(h->obj); +err_hws: + i915_gem_object_put(h->hws); +err_ctx: + kernel_context_close(h->ctx); + return err; +} + +static u64 hws_address(const struct i915_vma *hws, + const struct i915_request *rq) +{ + return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); +} + +static int move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + int err; + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, + flags & EXEC_OBJECT_WRITE); + if (err == 0) + err = i915_vma_move_to_active(vma, rq, flags); + i915_vma_unlock(vma); + + return err; +} + +static struct i915_request * +hang_create_request(struct hang *h, struct intel_engine_cs *engine) +{ + struct intel_gt *gt = h->gt; + struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx); + struct drm_i915_gem_object *obj; + struct i915_request *rq = NULL; + struct i915_vma *hws, *vma; + unsigned int flags; + void *vaddr; + u32 *batch; + int err; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) { + i915_vm_put(vm); + return ERR_CAST(obj); + } + + vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915)); + if (IS_ERR(vaddr)) { + i915_gem_object_put(obj); + i915_vm_put(vm); + return ERR_CAST(vaddr); + } + + i915_gem_object_unpin_map(h->obj); + i915_gem_object_put(h->obj); + + h->obj = obj; + h->batch = vaddr; + + vma = i915_vma_instance(h->obj, vm, NULL); + if (IS_ERR(vma)) { + i915_vm_put(vm); + return ERR_CAST(vma); + } + + hws = i915_vma_instance(h->hws, vm, NULL); + if (IS_ERR(hws)) { + i915_vm_put(vm); + return ERR_CAST(hws); + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) { + i915_vm_put(vm); + return ERR_PTR(err); + } + + err = i915_vma_pin(hws, 0, 0, PIN_USER); + if (err) + goto unpin_vma; + + rq = igt_request_alloc(h->ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto unpin_hws; + } + + err = move_to_active(vma, rq, 0); + if (err) + goto cancel_rq; + + err = move_to_active(hws, rq, 0); + if (err) + goto cancel_rq; + + batch = h->batch; + if (INTEL_GEN(gt->i915) >= 8) { + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = lower_32_bits(hws_address(hws, rq)); + *batch++ = upper_32_bits(hws_address(hws, rq)); + *batch++ = rq->fence.seqno; + *batch++ = MI_ARB_CHECK; + + memset(batch, 0, 1024); + batch += 1024 / sizeof(*batch); + + *batch++ = MI_ARB_CHECK; + *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; + *batch++ = lower_32_bits(vma->node.start); + *batch++ = upper_32_bits(vma->node.start); + } else if (INTEL_GEN(gt->i915) >= 6) { + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = 0; + *batch++ = lower_32_bits(hws_address(hws, rq)); + *batch++ = rq->fence.seqno; + *batch++ = MI_ARB_CHECK; + + memset(batch, 0, 1024); + batch += 1024 / sizeof(*batch); + + *batch++ = MI_ARB_CHECK; + *batch++ = MI_BATCH_BUFFER_START | 1 << 8; + *batch++ = lower_32_bits(vma->node.start); + } else if (INTEL_GEN(gt->i915) >= 4) { + *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *batch++ = 0; + *batch++ = lower_32_bits(hws_address(hws, rq)); + *batch++ = rq->fence.seqno; + *batch++ = MI_ARB_CHECK; + + memset(batch, 0, 1024); + batch += 1024 / sizeof(*batch); + + *batch++ = MI_ARB_CHECK; + *batch++ = MI_BATCH_BUFFER_START | 2 << 6; + *batch++ = lower_32_bits(vma->node.start); + } else { + *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *batch++ = lower_32_bits(hws_address(hws, rq)); + *batch++ = rq->fence.seqno; + *batch++ = MI_ARB_CHECK; + + memset(batch, 0, 1024); + batch += 1024 / sizeof(*batch); + + *batch++ = MI_ARB_CHECK; + *batch++ = MI_BATCH_BUFFER_START | 2 << 6; + *batch++ = lower_32_bits(vma->node.start); + } + *batch++ = MI_BATCH_BUFFER_END; /* not reached */ + intel_gt_chipset_flush(engine->gt); + + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) + goto cancel_rq; + } + + flags = 0; + if (INTEL_GEN(gt->i915) <= 5) + flags |= I915_DISPATCH_SECURE; + + err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); + +cancel_rq: + if (err) { + i915_request_skip(rq, err); + i915_request_add(rq); + } +unpin_hws: + i915_vma_unpin(hws); +unpin_vma: + i915_vma_unpin(vma); + i915_vm_put(vm); + return err ? ERR_PTR(err) : rq; +} + +static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) +{ + return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]); +} + +static void hang_fini(struct hang *h) +{ + *h->batch = MI_BATCH_BUFFER_END; + intel_gt_chipset_flush(h->gt); + + i915_gem_object_unpin_map(h->obj); + i915_gem_object_put(h->obj); + + i915_gem_object_unpin_map(h->hws); + i915_gem_object_put(h->hws); + + kernel_context_close(h->ctx); + + igt_flush_test(h->gt->i915); +} + +static bool wait_until_running(struct hang *h, struct i915_request *rq) +{ + return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq), + rq->fence.seqno), + 10) && + wait_for(i915_seqno_passed(hws_seqno(h, rq), + rq->fence.seqno), + 1000)); +} + +static int igt_hang_sanitycheck(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_request *rq; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct hang h; + int err; + + /* Basic check that we can execute our hanging batch */ + + err = hang_init(&h, gt); + if (err) + return err; + + for_each_engine(engine, gt, id) { + struct intel_wedge_me w; + long timeout; + + if (!intel_engine_can_store_dword(engine)) + continue; + + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + pr_err("Failed to create request for %s, err=%d\n", + engine->name, err); + goto fini; + } + + i915_request_get(rq); + + *h.batch = MI_BATCH_BUFFER_END; + intel_gt_chipset_flush(engine->gt); + + i915_request_add(rq); + + timeout = 0; + intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */) + timeout = i915_request_wait(rq, 0, + MAX_SCHEDULE_TIMEOUT); + if (intel_gt_is_wedged(gt)) + timeout = -EIO; + + i915_request_put(rq); + + if (timeout < 0) { + err = timeout; + pr_err("Wait for request failed on %s, err=%d\n", + engine->name, err); + goto fini; + } + } + +fini: + hang_fini(&h); + return err; +} + +static bool wait_for_idle(struct intel_engine_cs *engine) +{ + return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0; +} + +static int igt_reset_nop(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + unsigned int reset_count, count; + enum intel_engine_id id; + struct drm_file *file; + IGT_TIMEOUT(end_time); + int err = 0; + + /* Check that we can reset during non-user portions of requests */ + + file = mock_file(gt->i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = live_context(gt->i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + + i915_gem_context_clear_bannable(ctx); + reset_count = i915_reset_count(global); + count = 0; + do { + for_each_engine(engine, gt, id) { + int i; + + for (i = 0; i < 16; i++) { + struct i915_request *rq; + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + } + } + + igt_global_reset_lock(gt); + intel_gt_reset(gt, ALL_ENGINES, NULL); + igt_global_reset_unlock(gt); + + if (intel_gt_is_wedged(gt)) { + err = -EIO; + break; + } + + if (i915_reset_count(global) != reset_count + ++count) { + pr_err("Full GPU reset not recorded!\n"); + err = -EINVAL; + break; + } + + err = igt_flush_test(gt->i915); + if (err) + break; + } while (time_before(jiffies, end_time)); + pr_info("%s: %d resets\n", __func__, count); + + err = igt_flush_test(gt->i915); +out: + mock_file_free(gt->i915, file); + if (intel_gt_is_wedged(gt)) + err = -EIO; + return err; +} + +static int igt_reset_nop_engine(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + struct drm_file *file; + int err = 0; + + /* Check that we can engine-reset during non-user portions */ + + if (!intel_has_reset_engine(gt)) + return 0; + + file = mock_file(gt->i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = live_context(gt->i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + + i915_gem_context_clear_bannable(ctx); + for_each_engine(engine, gt, id) { + unsigned int reset_count, reset_engine_count; + unsigned int count; + IGT_TIMEOUT(end_time); + + reset_count = i915_reset_count(global); + reset_engine_count = i915_reset_engine_count(global, engine); + count = 0; + + set_bit(I915_RESET_ENGINE + id, >->reset.flags); + do { + int i; + + if (!wait_for_idle(engine)) { + pr_err("%s failed to idle before reset\n", + engine->name); + err = -EIO; + break; + } + + for (i = 0; i < 16; i++) { + struct i915_request *rq; + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + } + err = intel_engine_reset(engine, NULL); + if (err) { + pr_err("i915_reset_engine failed\n"); + break; + } + + if (i915_reset_count(global) != reset_count) { + pr_err("Full GPU reset recorded! (engine reset expected)\n"); + err = -EINVAL; + break; + } + + if (i915_reset_engine_count(global, engine) != + reset_engine_count + ++count) { + pr_err("%s engine reset not recorded!\n", + engine->name); + err = -EINVAL; + break; + } + } while (time_before(jiffies, end_time)); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + pr_info("%s(%s): %d resets\n", __func__, engine->name, count); + + if (err) + break; + + err = igt_flush_test(gt->i915); + if (err) + break; + } + + err = igt_flush_test(gt->i915); +out: + mock_file_free(gt->i915, file); + if (intel_gt_is_wedged(gt)) + err = -EIO; + return err; +} + +static int __igt_reset_engine(struct intel_gt *gt, bool active) +{ + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct hang h; + int err = 0; + + /* Check that we can issue an engine reset on an idle engine (no-op) */ + + if (!intel_has_reset_engine(gt)) + return 0; + + if (active) { + err = hang_init(&h, gt); + if (err) + return err; + } + + for_each_engine(engine, gt, id) { + unsigned int reset_count, reset_engine_count; + IGT_TIMEOUT(end_time); + + if (active && !intel_engine_can_store_dword(engine)) + continue; + + if (!wait_for_idle(engine)) { + pr_err("%s failed to idle before reset\n", + engine->name); + err = -EIO; + break; + } + + reset_count = i915_reset_count(global); + reset_engine_count = i915_reset_engine_count(global, engine); + + intel_engine_pm_get(engine); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); + do { + if (active) { + struct i915_request *rq; + + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (!wait_until_running(&h, rq)) { + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to start request %llx, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + i915_request_put(rq); + err = -EIO; + break; + } + + i915_request_put(rq); + } + + err = intel_engine_reset(engine, NULL); + if (err) { + pr_err("i915_reset_engine failed\n"); + break; + } + + if (i915_reset_count(global) != reset_count) { + pr_err("Full GPU reset recorded! (engine reset expected)\n"); + err = -EINVAL; + break; + } + + if (i915_reset_engine_count(global, engine) != + ++reset_engine_count) { + pr_err("%s engine reset not recorded!\n", + engine->name); + err = -EINVAL; + break; + } + } while (time_before(jiffies, end_time)); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + intel_engine_pm_put(engine); + + if (err) + break; + + err = igt_flush_test(gt->i915); + if (err) + break; + } + + if (intel_gt_is_wedged(gt)) + err = -EIO; + + if (active) + hang_fini(&h); + + return err; +} + +static int igt_reset_idle_engine(void *arg) +{ + return __igt_reset_engine(arg, false); +} + +static int igt_reset_active_engine(void *arg) +{ + return __igt_reset_engine(arg, true); +} + +struct active_engine { + struct task_struct *task; + struct intel_engine_cs *engine; + unsigned long resets; + unsigned int flags; +}; + +#define TEST_ACTIVE BIT(0) +#define TEST_OTHERS BIT(1) +#define TEST_SELF BIT(2) +#define TEST_PRIORITY BIT(3) + +static int active_request_put(struct i915_request *rq) +{ + int err = 0; + + if (!rq) + return 0; + + if (i915_request_wait(rq, 0, 5 * HZ) < 0) { + GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n", + rq->engine->name, + rq->fence.context, + rq->fence.seqno); + GEM_TRACE_DUMP(); + + intel_gt_set_wedged(rq->engine->gt); + err = -EIO; + } + + i915_request_put(rq); + + return err; +} + +static int active_engine(void *data) +{ + I915_RND_STATE(prng); + struct active_engine *arg = data; + struct intel_engine_cs *engine = arg->engine; + struct i915_request *rq[8] = {}; + struct i915_gem_context *ctx[ARRAY_SIZE(rq)]; + struct drm_file *file; + unsigned long count = 0; + int err = 0; + + file = mock_file(engine->i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + for (count = 0; count < ARRAY_SIZE(ctx); count++) { + ctx[count] = live_context(engine->i915, file); + if (IS_ERR(ctx[count])) { + err = PTR_ERR(ctx[count]); + while (--count) + i915_gem_context_put(ctx[count]); + goto err_file; + } + } + + while (!kthread_should_stop()) { + unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1); + struct i915_request *old = rq[idx]; + struct i915_request *new; + + new = igt_request_alloc(ctx[idx], engine); + if (IS_ERR(new)) { + err = PTR_ERR(new); + break; + } + + if (arg->flags & TEST_PRIORITY) + ctx[idx]->sched.priority = + i915_prandom_u32_max_state(512, &prng); + + rq[idx] = i915_request_get(new); + i915_request_add(new); + + err = active_request_put(old); + if (err) + break; + + cond_resched(); + } + + for (count = 0; count < ARRAY_SIZE(rq); count++) { + int err__ = active_request_put(rq[count]); + + /* Keep the first error */ + if (!err) + err = err__; + } + +err_file: + mock_file_free(engine->i915, file); + return err; +} + +static int __igt_reset_engines(struct intel_gt *gt, + const char *test_name, + unsigned int flags) +{ + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine, *other; + enum intel_engine_id id, tmp; + struct hang h; + int err = 0; + + /* Check that issuing a reset on one engine does not interfere + * with any other engine. + */ + + if (!intel_has_reset_engine(gt)) + return 0; + + if (flags & TEST_ACTIVE) { + err = hang_init(&h, gt); + if (err) + return err; + + if (flags & TEST_PRIORITY) + h.ctx->sched.priority = 1024; + } + + for_each_engine(engine, gt, id) { + struct active_engine threads[I915_NUM_ENGINES] = {}; + unsigned long device = i915_reset_count(global); + unsigned long count = 0, reported; + IGT_TIMEOUT(end_time); + + if (flags & TEST_ACTIVE && + !intel_engine_can_store_dword(engine)) + continue; + + if (!wait_for_idle(engine)) { + pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n", + engine->name, test_name); + err = -EIO; + break; + } + + memset(threads, 0, sizeof(threads)); + for_each_engine(other, gt, tmp) { + struct task_struct *tsk; + + threads[tmp].resets = + i915_reset_engine_count(global, other); + + if (!(flags & TEST_OTHERS)) + continue; + + if (other == engine && !(flags & TEST_SELF)) + continue; + + threads[tmp].engine = other; + threads[tmp].flags = flags; + + tsk = kthread_run(active_engine, &threads[tmp], + "igt/%s", other->name); + if (IS_ERR(tsk)) { + err = PTR_ERR(tsk); + goto unwind; + } + + threads[tmp].task = tsk; + get_task_struct(tsk); + } + + yield(); /* start all threads before we begin */ + + intel_engine_pm_get(engine); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); + do { + struct i915_request *rq = NULL; + + if (flags & TEST_ACTIVE) { + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (!wait_until_running(&h, rq)) { + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to start request %llx, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + i915_request_put(rq); + err = -EIO; + break; + } + } + + err = intel_engine_reset(engine, NULL); + if (err) { + pr_err("i915_reset_engine(%s:%s): failed, err=%d\n", + engine->name, test_name, err); + break; + } + + count++; + + if (rq) { + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + pr_err("i915_reset_engine(%s:%s):" + " failed to complete request after reset\n", + engine->name, test_name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + i915_request_put(rq); + + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + break; + } + + i915_request_put(rq); + } + + if (!(flags & TEST_SELF) && !wait_for_idle(engine)) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + pr_err("i915_reset_engine(%s:%s):" + " failed to idle after reset\n", + engine->name, test_name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + err = -EIO; + break; + } + } while (time_before(jiffies, end_time)); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + intel_engine_pm_put(engine); + pr_info("i915_reset_engine(%s:%s): %lu resets\n", + engine->name, test_name, count); + + reported = i915_reset_engine_count(global, engine); + reported -= threads[engine->id].resets; + if (reported != count) { + pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n", + engine->name, test_name, count, reported); + if (!err) + err = -EINVAL; + } + +unwind: + for_each_engine(other, gt, tmp) { + int ret; + + if (!threads[tmp].task) + continue; + + ret = kthread_stop(threads[tmp].task); + if (ret) { + pr_err("kthread for other engine %s failed, err=%d\n", + other->name, ret); + if (!err) + err = ret; + } + put_task_struct(threads[tmp].task); + + if (other->uabi_class != engine->uabi_class && + threads[tmp].resets != + i915_reset_engine_count(global, other)) { + pr_err("Innocent engine %s was reset (count=%ld)\n", + other->name, + i915_reset_engine_count(global, other) - + threads[tmp].resets); + if (!err) + err = -EINVAL; + } + } + + if (device != i915_reset_count(global)) { + pr_err("Global reset (count=%ld)!\n", + i915_reset_count(global) - device); + if (!err) + err = -EINVAL; + } + + if (err) + break; + + err = igt_flush_test(gt->i915); + if (err) + break; + } + + if (intel_gt_is_wedged(gt)) + err = -EIO; + + if (flags & TEST_ACTIVE) + hang_fini(&h); + + return err; +} + +static int igt_reset_engines(void *arg) +{ + static const struct { + const char *name; + unsigned int flags; + } phases[] = { + { "idle", 0 }, + { "active", TEST_ACTIVE }, + { "others-idle", TEST_OTHERS }, + { "others-active", TEST_OTHERS | TEST_ACTIVE }, + { + "others-priority", + TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY + }, + { + "self-priority", + TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF, + }, + { } + }; + struct intel_gt *gt = arg; + typeof(*phases) *p; + int err; + + for (p = phases; p->name; p++) { + if (p->flags & TEST_PRIORITY) { + if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + continue; + } + + err = __igt_reset_engines(arg, p->name, p->flags); + if (err) + return err; + } + + return 0; +} + +static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask) +{ + u32 count = i915_reset_count(>->i915->gpu_error); + + intel_gt_reset(gt, mask, NULL); + + return count; +} + +static int igt_reset_wait(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine = gt->engine[RCS0]; + struct i915_request *rq; + unsigned int reset_count; + struct hang h; + long timeout; + int err; + + if (!engine || !intel_engine_can_store_dword(engine)) + return 0; + + /* Check that we detect a stuck waiter and issue a reset */ + + igt_global_reset_lock(gt); + + err = hang_init(&h, gt); + if (err) + goto unlock; + + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto fini; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (!wait_until_running(&h, rq)) { + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to start request %llx, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); + + intel_gt_set_wedged(gt); + + err = -EIO; + goto out_rq; + } + + reset_count = fake_hangcheck(gt, ALL_ENGINES); + + timeout = i915_request_wait(rq, 0, 10); + if (timeout < 0) { + pr_err("i915_request_wait failed on a stuck request: err=%ld\n", + timeout); + err = timeout; + goto out_rq; + } + + if (i915_reset_count(global) == reset_count) { + pr_err("No GPU reset recorded!\n"); + err = -EINVAL; + goto out_rq; + } + +out_rq: + i915_request_put(rq); +fini: + hang_fini(&h); +unlock: + igt_global_reset_unlock(gt); + + if (intel_gt_is_wedged(gt)) + return -EIO; + + return err; +} + +struct evict_vma { + struct completion completion; + struct i915_vma *vma; +}; + +static int evict_vma(void *data) +{ + struct evict_vma *arg = data; + struct i915_address_space *vm = arg->vma->vm; + struct drm_mm_node evict = arg->vma->node; + int err; + + complete(&arg->completion); + + mutex_lock(&vm->mutex); + err = i915_gem_evict_for_node(vm, &evict, 0); + mutex_unlock(&vm->mutex); + + return err; +} + +static int evict_fence(void *data) +{ + struct evict_vma *arg = data; + int err; + + complete(&arg->completion); + + /* Mark the fence register as dirty to force the mmio update. */ + err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512); + if (err) { + pr_err("Invalid Y-tiling settings; err:%d\n", err); + return err; + } + + err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE); + if (err) { + pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err); + return err; + } + + err = i915_vma_pin_fence(arg->vma); + i915_vma_unpin(arg->vma); + if (err) { + pr_err("Unable to pin Y-tiled fence; err:%d\n", err); + return err; + } + + i915_vma_unpin_fence(arg->vma); + + return 0; +} + +static int __igt_reset_evict_vma(struct intel_gt *gt, + struct i915_address_space *vm, + int (*fn)(void *), + unsigned int flags) +{ + struct intel_engine_cs *engine = gt->engine[RCS0]; + struct drm_i915_gem_object *obj; + struct task_struct *tsk = NULL; + struct i915_request *rq; + struct evict_vma arg; + struct hang h; + unsigned int pin_flags; + int err; + + if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE) + return 0; + + if (!engine || !intel_engine_can_store_dword(engine)) + return 0; + + /* Check that we can recover an unbind stuck on a hanging request */ + + err = hang_init(&h, gt); + if (err) + return err; + + obj = i915_gem_object_create_internal(gt->i915, SZ_1M); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto fini; + } + + if (flags & EXEC_OBJECT_NEEDS_FENCE) { + err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512); + if (err) { + pr_err("Invalid X-tiling settings; err:%d\n", err); + goto out_obj; + } + } + + arg.vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(arg.vma)) { + err = PTR_ERR(arg.vma); + goto out_obj; + } + + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_obj; + } + + pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER; + + if (flags & EXEC_OBJECT_NEEDS_FENCE) + pin_flags |= PIN_MAPPABLE; + + err = i915_vma_pin(arg.vma, 0, 0, pin_flags); + if (err) { + i915_request_add(rq); + goto out_obj; + } + + if (flags & EXEC_OBJECT_NEEDS_FENCE) { + err = i915_vma_pin_fence(arg.vma); + if (err) { + pr_err("Unable to pin X-tiled fence; err:%d\n", err); + i915_vma_unpin(arg.vma); + i915_request_add(rq); + goto out_obj; + } + } + + i915_vma_lock(arg.vma); + err = i915_request_await_object(rq, arg.vma->obj, + flags & EXEC_OBJECT_WRITE); + if (err == 0) + err = i915_vma_move_to_active(arg.vma, rq, flags); + i915_vma_unlock(arg.vma); + + if (flags & EXEC_OBJECT_NEEDS_FENCE) + i915_vma_unpin_fence(arg.vma); + i915_vma_unpin(arg.vma); + + i915_request_get(rq); + i915_request_add(rq); + if (err) + goto out_rq; + + if (!wait_until_running(&h, rq)) { + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to start request %llx, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); + + intel_gt_set_wedged(gt); + goto out_reset; + } + + init_completion(&arg.completion); + + tsk = kthread_run(fn, &arg, "igt/evict_vma"); + if (IS_ERR(tsk)) { + err = PTR_ERR(tsk); + tsk = NULL; + goto out_reset; + } + get_task_struct(tsk); + + wait_for_completion(&arg.completion); + + if (wait_for(!list_empty(&rq->fence.cb_list), 10)) { + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); + + pr_err("igt/evict_vma kthread did not wait\n"); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); + + intel_gt_set_wedged(gt); + goto out_reset; + } + +out_reset: + igt_global_reset_lock(gt); + fake_hangcheck(gt, rq->engine->mask); + igt_global_reset_unlock(gt); + + if (tsk) { + struct intel_wedge_me w; + + /* The reset, even indirectly, should take less than 10ms. */ + intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */) + err = kthread_stop(tsk); + + put_task_struct(tsk); + } + +out_rq: + i915_request_put(rq); +out_obj: + i915_gem_object_put(obj); +fini: + hang_fini(&h); + if (intel_gt_is_wedged(gt)) + return -EIO; + + return err; +} + +static int igt_reset_evict_ggtt(void *arg) +{ + struct intel_gt *gt = arg; + + return __igt_reset_evict_vma(gt, >->ggtt->vm, + evict_vma, EXEC_OBJECT_WRITE); +} + +static int igt_reset_evict_ppgtt(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx; + struct i915_address_space *vm; + struct drm_file *file; + int err; + + file = mock_file(gt->i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = live_context(gt->i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + + err = 0; + vm = i915_gem_context_get_vm_rcu(ctx); + if (!i915_is_ggtt(vm)) { + /* aliasing == global gtt locking, covered above */ + err = __igt_reset_evict_vma(gt, vm, + evict_vma, EXEC_OBJECT_WRITE); + } + i915_vm_put(vm); + +out: + mock_file_free(gt->i915, file); + return err; +} + +static int igt_reset_evict_fence(void *arg) +{ + struct intel_gt *gt = arg; + + return __igt_reset_evict_vma(gt, >->ggtt->vm, + evict_fence, EXEC_OBJECT_NEEDS_FENCE); +} + +static int wait_for_others(struct intel_gt *gt, + struct intel_engine_cs *exclude) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) { + if (engine == exclude) + continue; + + if (!wait_for_idle(engine)) + return -EIO; + } + + return 0; +} + +static int igt_reset_queue(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct hang h; + int err; + + /* Check that we replay pending requests following a hang */ + + igt_global_reset_lock(gt); + + err = hang_init(&h, gt); + if (err) + goto unlock; + + for_each_engine(engine, gt, id) { + struct i915_request *prev; + IGT_TIMEOUT(end_time); + unsigned int count; + + if (!intel_engine_can_store_dword(engine)) + continue; + + prev = hang_create_request(&h, engine); + if (IS_ERR(prev)) { + err = PTR_ERR(prev); + goto fini; + } + + i915_request_get(prev); + i915_request_add(prev); + + count = 0; + do { + struct i915_request *rq; + unsigned int reset_count; + + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto fini; + } + + i915_request_get(rq); + i915_request_add(rq); + + /* + * XXX We don't handle resetting the kernel context + * very well. If we trigger a device reset twice in + * quick succession while the kernel context is + * executing, we may end up skipping the breadcrumb. + * This is really only a problem for the selftest as + * normally there is a large interlude between resets + * (hangcheck), or we focus on resetting just one + * engine and so avoid repeatedly resetting innocents. + */ + err = wait_for_others(gt, engine); + if (err) { + pr_err("%s(%s): Failed to idle other inactive engines after device reset\n", + __func__, engine->name); + i915_request_put(rq); + i915_request_put(prev); + + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + goto fini; + } + + if (!wait_until_running(&h, prev)) { + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); + + pr_err("%s(%s): Failed to start request %llx, at %x\n", + __func__, engine->name, + prev->fence.seqno, hws_seqno(&h, prev)); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + i915_request_put(rq); + i915_request_put(prev); + + intel_gt_set_wedged(gt); + + err = -EIO; + goto fini; + } + + reset_count = fake_hangcheck(gt, BIT(id)); + + if (prev->fence.error != -EIO) { + pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", + prev->fence.error); + i915_request_put(rq); + i915_request_put(prev); + err = -EINVAL; + goto fini; + } + + if (rq->fence.error) { + pr_err("Fence error status not zero [%d] after unrelated reset\n", + rq->fence.error); + i915_request_put(rq); + i915_request_put(prev); + err = -EINVAL; + goto fini; + } + + if (i915_reset_count(global) == reset_count) { + pr_err("No GPU reset recorded!\n"); + i915_request_put(rq); + i915_request_put(prev); + err = -EINVAL; + goto fini; + } + + i915_request_put(prev); + prev = rq; + count++; + } while (time_before(jiffies, end_time)); + pr_info("%s: Completed %d resets\n", engine->name, count); + + *h.batch = MI_BATCH_BUFFER_END; + intel_gt_chipset_flush(engine->gt); + + i915_request_put(prev); + + err = igt_flush_test(gt->i915); + if (err) + break; + } + +fini: + hang_fini(&h); +unlock: + igt_global_reset_unlock(gt); + + if (intel_gt_is_wedged(gt)) + return -EIO; + + return err; +} + +static int igt_handle_error(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine = gt->engine[RCS0]; + struct hang h; + struct i915_request *rq; + struct i915_gpu_state *error; + int err; + + /* Check that we can issue a global GPU and engine reset */ + + if (!intel_has_reset_engine(gt)) + return 0; + + if (!engine || !intel_engine_can_store_dword(engine)) + return 0; + + err = hang_init(&h, gt); + if (err) + return err; + + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_fini; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (!wait_until_running(&h, rq)) { + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to start request %llx, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); + + intel_gt_set_wedged(gt); + + err = -EIO; + goto err_request; + } + + /* Temporarily disable error capture */ + error = xchg(&global->first_error, (void *)-1); + + intel_gt_handle_error(gt, engine->mask, 0, NULL); + + xchg(&global->first_error, error); + + if (rq->fence.error != -EIO) { + pr_err("Guilty request not identified!\n"); + err = -EINVAL; + goto err_request; + } + +err_request: + i915_request_put(rq); +err_fini: + hang_fini(&h); + return err; +} + +static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, + const struct igt_atomic_section *p, + const char *mode) +{ + struct tasklet_struct * const t = &engine->execlists.tasklet; + int err; + + GEM_TRACE("i915_reset_engine(%s:%s) under %s\n", + engine->name, mode, p->name); + + tasklet_disable(t); + p->critical_section_begin(); + + err = intel_engine_reset(engine, NULL); + + p->critical_section_end(); + tasklet_enable(t); + + if (err) + pr_err("i915_reset_engine(%s:%s) failed under %s\n", + engine->name, mode, p->name); + + return err; +} + +static int igt_atomic_reset_engine(struct intel_engine_cs *engine, + const struct igt_atomic_section *p) +{ + struct i915_request *rq; + struct hang h; + int err; + + err = __igt_atomic_reset_engine(engine, p, "idle"); + if (err) + return err; + + err = hang_init(&h, engine->gt); + if (err) + return err; + + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (wait_until_running(&h, rq)) { + err = __igt_atomic_reset_engine(engine, p, "active"); + } else { + pr_err("%s(%s): Failed to start request %llx, at %x\n", + __func__, engine->name, + rq->fence.seqno, hws_seqno(&h, rq)); + intel_gt_set_wedged(engine->gt); + err = -EIO; + } + + if (err == 0) { + struct intel_wedge_me w; + + intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */) + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + if (intel_gt_is_wedged(engine->gt)) + err = -EIO; + } + + i915_request_put(rq); +out: + hang_fini(&h); + return err; +} + +static int igt_reset_engines_atomic(void *arg) +{ + struct intel_gt *gt = arg; + const typeof(*igt_atomic_phases) *p; + int err = 0; + + /* Check that the engines resets are usable from atomic context */ + + if (!intel_has_reset_engine(gt)) + return 0; + + if (USES_GUC_SUBMISSION(gt->i915)) + return 0; + + igt_global_reset_lock(gt); + + /* Flush any requests before we get started and check basics */ + if (!igt_force_reset(gt)) + goto unlock; + + for (p = igt_atomic_phases; p->name; p++) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) { + err = igt_atomic_reset_engine(engine, p); + if (err) + goto out; + } + } + +out: + /* As we poke around the guts, do a full reset before continuing. */ + igt_force_reset(gt); +unlock: + igt_global_reset_unlock(gt); + + return err; +} + +int intel_hangcheck_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_hang_sanitycheck), + SUBTEST(igt_reset_nop), + SUBTEST(igt_reset_nop_engine), + SUBTEST(igt_reset_idle_engine), + SUBTEST(igt_reset_active_engine), + SUBTEST(igt_reset_engines), + SUBTEST(igt_reset_engines_atomic), + SUBTEST(igt_reset_queue), + SUBTEST(igt_reset_wait), + SUBTEST(igt_reset_evict_ggtt), + SUBTEST(igt_reset_evict_ppgtt), + SUBTEST(igt_reset_evict_fence), + SUBTEST(igt_handle_error), + }; + struct intel_gt *gt = &i915->gt; + intel_wakeref_t wakeref; + int err; + + if (!intel_has_gpu_reset(gt)) + return 0; + + if (intel_gt_is_wedged(gt)) + return -EIO; /* we're long past hope of a successful reset */ + + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + err = intel_gt_live_subtests(tests, gt); + + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c new file mode 100644 index 000000000000..fd3770e48ac7 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "intel_pm.h" /* intel_gpu_freq() */ +#include "selftest_llc.h" +#include "intel_rps.h" + +static int gen6_verify_ring_freq(struct intel_llc *llc) +{ + struct drm_i915_private *i915 = llc_to_gt(llc)->i915; + struct ia_constants consts; + intel_wakeref_t wakeref; + unsigned int gpu_freq; + int err = 0; + + wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm); + + if (!get_ia_constants(llc, &consts)) { + err = -ENODEV; + goto out_rpm; + } + + for (gpu_freq = consts.min_gpu_freq; + gpu_freq <= consts.max_gpu_freq; + gpu_freq++) { + struct intel_rps *rps = &llc_to_gt(llc)->rps; + + unsigned int ia_freq, ring_freq, found; + u32 val; + + calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); + + val = gpu_freq; + if (sandybridge_pcode_read(i915, + GEN6_PCODE_READ_MIN_FREQ_TABLE, + &val, NULL)) { + pr_err("Failed to read freq table[%d], range [%d, %d]\n", + gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq); + err = -ENXIO; + break; + } + + found = (val >> 0) & 0xff; + if (found != ia_freq) { + pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n", + gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq, + intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), + found, ia_freq); + err = -EINVAL; + break; + } + + found = (val >> 8) & 0xff; + if (found != ring_freq) { + pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n", + gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq, + intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), + found, ring_freq); + err = -EINVAL; + break; + } + } + +out_rpm: + intel_runtime_pm_put(llc_to_gt(llc)->uncore->rpm, wakeref); + return err; +} + +int st_llc_verify(struct intel_llc *llc) +{ + int err = 0; + + if (HAS_LLC(llc_to_gt(llc)->i915)) + err = gen6_verify_ring_freq(llc); + + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.h b/drivers/gpu/drm/i915/gt/selftest_llc.h new file mode 100644 index 000000000000..873f896e72f2 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_llc.h @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef SELFTEST_LLC_H +#define SELFTEST_LLC_H + +struct intel_llc; + +int st_llc_verify(struct intel_llc *llc); + +#endif /* SELFTEST_LLC_H */ diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c new file mode 100644 index 000000000000..eb71ac2f992c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -0,0 +1,3554 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include <linux/prime_numbers.h> + +#include "gem/i915_gem_pm.h" +#include "gt/intel_engine_heartbeat.h" +#include "gt/intel_reset.h" + +#include "i915_selftest.h" +#include "selftests/i915_random.h" +#include "selftests/igt_flush_test.h" +#include "selftests/igt_live_test.h" +#include "selftests/igt_spinner.h" +#include "selftests/lib_sw_fence.h" + +#include "gem/selftests/igt_gem_utils.h" +#include "gem/selftests/mock_context.h" + +#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) +#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */ + +static struct i915_vma *create_scratch(struct intel_gt *gt) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) { + i915_gem_object_put(obj); + return ERR_PTR(err); + } + + return vma; +} + +static int live_sanitycheck(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_engines_iter it; + struct i915_gem_context *ctx; + struct intel_context *ce; + struct igt_spinner spin; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915)) + return 0; + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + ctx = kernel_context(gt->i915); + if (!ctx) + goto err_spin; + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + struct i915_request *rq; + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin, rq)) { + GEM_TRACE("spinner failed to start\n"); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx; + } + + igt_spinner_end(&spin); + if (igt_flush_test(gt->i915)) { + err = -EIO; + goto err_ctx; + } + } + + err = 0; +err_ctx: + i915_gem_context_unlock_engines(ctx); + kernel_context_close(ctx); +err_spin: + igt_spinner_fini(&spin); + return err; +} + +static int live_unlite_restore(struct intel_gt *gt, int prio) +{ + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + struct igt_spinner spin; + int err = -ENOMEM; + + /* + * Check that we can correctly context switch between 2 instances + * on the same engine from the same parent context. + */ + + if (igt_spinner_init(&spin, gt)) + return err; + + ctx = kernel_context(gt->i915); + if (!ctx) + goto err_spin; + + err = 0; + for_each_engine(engine, gt, id) { + struct intel_context *ce[2] = {}; + struct i915_request *rq[2]; + struct igt_live_test t; + int n; + + if (prio && !intel_engine_has_preemption(engine)) + continue; + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + break; + } + + for (n = 0; n < ARRAY_SIZE(ce); n++) { + struct intel_context *tmp; + + tmp = intel_context_create(ctx, engine); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + goto err_ce; + } + + err = intel_context_pin(tmp); + if (err) { + intel_context_put(tmp); + goto err_ce; + } + + /* + * Setup the pair of contexts such that if we + * lite-restore using the RING_TAIL from ce[1] it + * will execute garbage from ce[0]->ring. + */ + memset(tmp->ring->vaddr, + POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */ + tmp->ring->vma->size); + + ce[n] = tmp; + } + GEM_BUG_ON(!ce[1]->ring->size); + intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2); + __execlists_update_reg_state(ce[1], engine); + + rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); + if (IS_ERR(rq[0])) { + err = PTR_ERR(rq[0]); + goto err_ce; + } + + i915_request_get(rq[0]); + i915_request_add(rq[0]); + GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit); + + if (!igt_wait_for_spinner(&spin, rq[0])) { + i915_request_put(rq[0]); + goto err_ce; + } + + rq[1] = i915_request_create(ce[1]); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + i915_request_put(rq[0]); + goto err_ce; + } + + if (!prio) { + /* + * Ensure we do the switch to ce[1] on completion. + * + * rq[0] is already submitted, so this should reduce + * to a no-op (a wait on a request on the same engine + * uses the submit fence, not the completion fence), + * but it will install a dependency on rq[1] for rq[0] + * that will prevent the pair being reordered by + * timeslicing. + */ + i915_request_await_dma_fence(rq[1], &rq[0]->fence); + } + + i915_request_get(rq[1]); + i915_request_add(rq[1]); + GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); + i915_request_put(rq[0]); + + if (prio) { + struct i915_sched_attr attr = { + .priority = prio, + }; + + /* Alternatively preempt the spinner with ce[1] */ + engine->schedule(rq[1], &attr); + } + + /* And switch back to ce[0] for good measure */ + rq[0] = i915_request_create(ce[0]); + if (IS_ERR(rq[0])) { + err = PTR_ERR(rq[0]); + i915_request_put(rq[1]); + goto err_ce; + } + + i915_request_await_dma_fence(rq[0], &rq[1]->fence); + i915_request_get(rq[0]); + i915_request_add(rq[0]); + GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); + i915_request_put(rq[1]); + i915_request_put(rq[0]); + +err_ce: + tasklet_kill(&engine->execlists.tasklet); /* flush submission */ + igt_spinner_end(&spin); + for (n = 0; n < ARRAY_SIZE(ce); n++) { + if (IS_ERR_OR_NULL(ce[n])) + break; + + intel_context_unpin(ce[n]); + intel_context_put(ce[n]); + } + + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + break; + } + + kernel_context_close(ctx); +err_spin: + igt_spinner_fini(&spin); + return err; +} + +static int live_unlite_switch(void *arg) +{ + return live_unlite_restore(arg, 0); +} + +static int live_unlite_preempt(void *arg) +{ + return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX)); +} + +static int +emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 10); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_NEQ_SDD; + *cs++ = 0; + *cs++ = i915_ggtt_offset(vma) + 4 * idx; + *cs++ = 0; + + if (idx > 0) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); + *cs++ = 0; + *cs++ = 1; + } else { + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + intel_ring_advance(rq, cs); + return 0; +} + +static struct i915_request * +semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) +{ + struct i915_gem_context *ctx; + struct i915_request *rq; + int err; + + ctx = kernel_context(engine->i915); + if (!ctx) + return ERR_PTR(-ENOMEM); + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) + goto out_ctx; + + err = 0; + if (rq->engine->emit_init_breadcrumb) + err = rq->engine->emit_init_breadcrumb(rq); + if (err == 0) + err = emit_semaphore_chain(rq, vma, idx); + if (err == 0) + i915_request_get(rq); + i915_request_add(rq); + if (err) + rq = ERR_PTR(err); + +out_ctx: + kernel_context_close(ctx); + return rq; +} + +static int +release_queue(struct intel_engine_cs *engine, + struct i915_vma *vma, + int idx, int prio) +{ + struct i915_sched_attr attr = { + .priority = prio, + }; + struct i915_request *rq; + u32 *cs; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); + *cs++ = 0; + *cs++ = 1; + + intel_ring_advance(rq, cs); + + i915_request_get(rq); + i915_request_add(rq); + + local_bh_disable(); + engine->schedule(rq, &attr); + local_bh_enable(); /* kick tasklet */ + + i915_request_put(rq); + + return 0; +} + +static int +slice_semaphore_queue(struct intel_engine_cs *outer, + struct i915_vma *vma, + int count) +{ + struct intel_engine_cs *engine; + struct i915_request *head; + enum intel_engine_id id; + int err, i, n = 0; + + head = semaphore_queue(outer, vma, n++); + if (IS_ERR(head)) + return PTR_ERR(head); + + for_each_engine(engine, outer->gt, id) { + for (i = 0; i < count; i++) { + struct i915_request *rq; + + rq = semaphore_queue(engine, vma, n++); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + i915_request_put(rq); + } + } + + err = release_queue(outer, vma, n, INT_MAX); + if (err) + goto out; + + if (i915_request_wait(head, 0, + 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) { + pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n", + count, n); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(outer->gt); + err = -EIO; + } + +out: + i915_request_put(head); + return err; +} + +static int live_timeslice_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + void *vaddr; + int err = 0; + int count; + + /* + * If a request takes too long, we would like to give other users + * a fair go on the GPU. In particular, users may create batches + * that wait upon external input, where that input may even be + * supplied by another GPU job. To avoid blocking forever, we + * need to preempt the current task and replace it with another + * ready task. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_map; + + for_each_prime_number_from(count, 1, 16) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) { + if (!intel_engine_has_preemption(engine)) + continue; + + memset(vaddr, 0, PAGE_SIZE); + + err = slice_semaphore_queue(engine, vma, count); + if (err) + goto err_pin; + + if (igt_flush_test(gt->i915)) { + err = -EIO; + goto err_pin; + } + } + } + +err_pin: + i915_vma_unpin(vma); +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); + return err; +} + +static struct i915_request *nop_request(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return rq; + + i915_request_get(rq); + i915_request_add(rq); + + return rq; +} + +static void wait_for_submit(struct intel_engine_cs *engine, + struct i915_request *rq) +{ + do { + cond_resched(); + intel_engine_flush_submission(engine); + } while (!i915_request_is_active(rq)); +} + +static long timeslice_threshold(const struct intel_engine_cs *engine) +{ + return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1; +} + +static int live_timeslice_queue(void *arg) +{ + struct intel_gt *gt = arg; + struct drm_i915_gem_object *obj; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct i915_vma *vma; + void *vaddr; + int err = 0; + + /* + * Make sure that even if ELSP[0] and ELSP[1] are filled with + * timeslicing between them disabled, we *do* enable timeslicing + * if the queue demands it. (Normally, we do not submit if + * ELSP[1] is already occupied, so must rely on timeslicing to + * eject ELSP[0] in favour of the queue.) + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_map; + + for_each_engine(engine, gt, id) { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + }; + struct i915_request *rq, *nop; + + if (!intel_engine_has_preemption(engine)) + continue; + + memset(vaddr, 0, PAGE_SIZE); + + /* ELSP[0]: semaphore wait */ + rq = semaphore_queue(engine, vma, 0); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_pin; + } + engine->schedule(rq, &attr); + wait_for_submit(engine, rq); + + /* ELSP[1]: nop request */ + nop = nop_request(engine); + if (IS_ERR(nop)) { + err = PTR_ERR(nop); + i915_request_put(rq); + goto err_pin; + } + wait_for_submit(engine, nop); + i915_request_put(nop); + + GEM_BUG_ON(i915_request_completed(rq)); + GEM_BUG_ON(execlists_active(&engine->execlists) != rq); + + /* Queue: semaphore signal, matching priority as semaphore */ + err = release_queue(engine, vma, 1, effective_prio(rq)); + if (err) { + i915_request_put(rq); + goto err_pin; + } + + intel_engine_flush_submission(engine); + if (!READ_ONCE(engine->execlists.timer.expires) && + !i915_request_completed(rq)) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + GEM_TRACE_ERR("%s: Failed to enable timeslicing!\n", + engine->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + GEM_TRACE_DUMP(); + + memset(vaddr, 0xff, PAGE_SIZE); + err = -EINVAL; + } + + /* Timeslice every jiffy, so within 2 we should signal */ + if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to timeslice into queue\n", + engine->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + memset(vaddr, 0xff, PAGE_SIZE); + err = -EIO; + } + i915_request_put(rq); + if (err) + break; + } + +err_pin: + i915_vma_unpin(vma); +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); + return err; +} + +static int live_busywait_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct intel_engine_cs *engine; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + enum intel_engine_id id; + int err = -ENOMEM; + u32 *map; + + /* + * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can + * preempt the busywaits used to synchronise between rings. + */ + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + return -ENOMEM; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_ctx_lo; + } + + map = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto err_obj; + } + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_map; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_map; + + for_each_engine(engine, gt, id) { + struct i915_request *lo, *hi; + struct igt_live_test t; + u32 *cs; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + goto err_vma; + } + + /* + * We create two requests. The low priority request + * busywaits on a semaphore (inside the ringbuffer where + * is should be preemptible) and the high priority requests + * uses a MI_STORE_DWORD_IMM to update the semaphore value + * allowing the first request to complete. If preemption + * fails, we hang instead. + */ + + lo = igt_request_alloc(ctx_lo, engine); + if (IS_ERR(lo)) { + err = PTR_ERR(lo); + goto err_vma; + } + + cs = intel_ring_begin(lo, 8); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + i915_request_add(lo); + goto err_vma; + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma); + *cs++ = 0; + *cs++ = 1; + + /* XXX Do we need a flush + invalidate here? */ + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = i915_ggtt_offset(vma); + *cs++ = 0; + + intel_ring_advance(lo, cs); + i915_request_add(lo); + + if (wait_for(READ_ONCE(*map), 10)) { + err = -ETIMEDOUT; + goto err_vma; + } + + /* Low priority request should be busywaiting now */ + if (i915_request_wait(lo, 0, 1) != -ETIME) { + pr_err("%s: Busywaiting request did not!\n", + engine->name); + err = -EIO; + goto err_vma; + } + + hi = igt_request_alloc(ctx_hi, engine); + if (IS_ERR(hi)) { + err = PTR_ERR(hi); + goto err_vma; + } + + cs = intel_ring_begin(hi, 4); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + i915_request_add(hi); + goto err_vma; + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma); + *cs++ = 0; + *cs++ = 0; + + intel_ring_advance(hi, cs); + i915_request_add(hi); + + if (i915_request_wait(lo, 0, HZ / 5) < 0) { + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to preempt semaphore busywait!\n", + engine->name); + + intel_engine_dump(engine, &p, "%s\n", engine->name); + GEM_TRACE_DUMP(); + + intel_gt_set_wedged(gt); + err = -EIO; + goto err_vma; + } + GEM_BUG_ON(READ_ONCE(*map)); + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_vma; + } + } + + err = 0; +err_vma: + i915_vma_unpin(vma); +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); + return err; +} + +static struct i915_request * +spinner_create_request(struct igt_spinner *spin, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 arb) +{ + struct intel_context *ce; + struct i915_request *rq; + + ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + rq = igt_spinner_create_request(spin, ce, arb); + intel_context_put(ce); + return rq; +} + +static int live_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct igt_spinner spin_hi, spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) + pr_err("Logical preemption supported, but not exposed\n"); + + if (igt_spinner_init(&spin_hi, gt)) + return -ENOMEM; + + if (igt_spinner_init(&spin_lo, gt)) + goto err_spin_hi; + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); + + for_each_engine(engine, gt, id) { + struct igt_live_test t; + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_lo, rq)) { + GEM_TRACE("lo spinner failed to start\n"); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + igt_spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_hi, rq)) { + GEM_TRACE("hi spinner failed to start\n"); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_ctx_lo; + } + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + igt_spinner_fini(&spin_lo); +err_spin_hi: + igt_spinner_fini(&spin_hi); + return err; +} + +static int live_late_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct igt_spinner spin_hi, spin_lo; + struct intel_engine_cs *engine; + struct i915_sched_attr attr = {}; + enum intel_engine_id id; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (igt_spinner_init(&spin_hi, gt)) + return -ENOMEM; + + if (igt_spinner_init(&spin_lo, gt)) + goto err_spin_hi; + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + goto err_spin_lo; + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + + /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ + ctx_lo->sched.priority = I915_USER_PRIORITY(1); + + for_each_engine(engine, gt, id) { + struct igt_live_test t; + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_lo, rq)) { + pr_err("First context failed to start\n"); + goto err_wedged; + } + + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_NOOP); + if (IS_ERR(rq)) { + igt_spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (igt_wait_for_spinner(&spin_hi, rq)) { + pr_err("Second context overtook first?\n"); + goto err_wedged; + } + + attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); + engine->schedule(rq, &attr); + + if (!igt_wait_for_spinner(&spin_hi, rq)) { + pr_err("High priority context failed to preempt the low priority context\n"); + GEM_TRACE_DUMP(); + goto err_wedged; + } + + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_ctx_lo; + } + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + igt_spinner_fini(&spin_lo); +err_spin_hi: + igt_spinner_fini(&spin_hi); + return err; + +err_wedged: + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; +} + +struct preempt_client { + struct igt_spinner spin; + struct i915_gem_context *ctx; +}; + +static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c) +{ + c->ctx = kernel_context(gt->i915); + if (!c->ctx) + return -ENOMEM; + + if (igt_spinner_init(&c->spin, gt)) + goto err_ctx; + + return 0; + +err_ctx: + kernel_context_close(c->ctx); + return -ENOMEM; +} + +static void preempt_client_fini(struct preempt_client *c) +{ + igt_spinner_fini(&c->spin); + kernel_context_close(c->ctx); +} + +static int live_nopreempt(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct preempt_client a, b; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Verify that we can disable preemption for an individual request + * that may be being observed and not want to be interrupted. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (preempt_client_init(gt, &a)) + return -ENOMEM; + if (preempt_client_init(gt, &b)) + goto err_client_a; + b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); + + for_each_engine(engine, gt, id) { + struct i915_request *rq_a, *rq_b; + + if (!intel_engine_has_preemption(engine)) + continue; + + engine->execlists.preempt_hang.count = 0; + + rq_a = spinner_create_request(&a.spin, + a.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq_a)) { + err = PTR_ERR(rq_a); + goto err_client_b; + } + + /* Low priority client, but unpreemptable! */ + rq_a->flags |= I915_REQUEST_NOPREEMPT; + + i915_request_add(rq_a); + if (!igt_wait_for_spinner(&a.spin, rq_a)) { + pr_err("First client failed to start\n"); + goto err_wedged; + } + + rq_b = spinner_create_request(&b.spin, + b.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq_b)) { + err = PTR_ERR(rq_b); + goto err_client_b; + } + + i915_request_add(rq_b); + + /* B is much more important than A! (But A is unpreemptable.) */ + GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a)); + + /* Wait long enough for preemption and timeslicing */ + if (igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client started too early!\n"); + goto err_wedged; + } + + igt_spinner_end(&a.spin); + + if (!igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client failed to start\n"); + goto err_wedged; + } + + igt_spinner_end(&b.spin); + + if (engine->execlists.preempt_hang.count) { + pr_err("Preemption recorded x%d; should have been suppressed!\n", + engine->execlists.preempt_hang.count); + err = -EINVAL; + goto err_wedged; + } + + if (igt_flush_test(gt->i915)) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&b); +err_client_a: + preempt_client_fini(&a); + return err; + +err_wedged: + igt_spinner_end(&b.spin); + igt_spinner_end(&a.spin); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_client_b; +} + +struct live_preempt_cancel { + struct intel_engine_cs *engine; + struct preempt_client a, b; +}; + +static int __cancel_active0(struct live_preempt_cancel *arg) +{ + struct i915_request *rq; + struct igt_live_test t; + int err; + + /* Preempt cancel of ELSP0 */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags); + rq = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&arg->a.spin, rq)) { + err = -EIO; + goto out; + } + + i915_gem_context_set_banned(arg->a.ctx); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_active1(struct live_preempt_cancel *arg) +{ + struct i915_request *rq[2] = {}; + struct igt_live_test t; + int err; + + /* Preempt cancel of ELSP1 */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags); + rq[0] = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_NOOP); /* no preemption */ + if (IS_ERR(rq[0])) + return PTR_ERR(rq[0]); + + i915_request_get(rq[0]); + i915_request_add(rq[0]); + if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { + err = -EIO; + goto out; + } + + clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags); + rq[1] = spinner_create_request(&arg->b.spin, + arg->b.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + goto out; + } + + i915_request_get(rq[1]); + err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); + i915_request_add(rq[1]); + if (err) + goto out; + + i915_gem_context_set_banned(arg->b.ctx); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + igt_spinner_end(&arg->a.spin); + if (i915_request_wait(rq[1], 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq[0]->fence.error != 0) { + pr_err("Normal inflight0 request did not complete\n"); + err = -EINVAL; + goto out; + } + + if (rq[1]->fence.error != -EIO) { + pr_err("Cancelled inflight1 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq[1]); + i915_request_put(rq[0]); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_queued(struct live_preempt_cancel *arg) +{ + struct i915_request *rq[3] = {}; + struct igt_live_test t; + int err; + + /* Full ELSP and one in the wings */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags); + rq[0] = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[0])) + return PTR_ERR(rq[0]); + + i915_request_get(rq[0]); + i915_request_add(rq[0]); + if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { + err = -EIO; + goto out; + } + + clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags); + rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + goto out; + } + + i915_request_get(rq[1]); + err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); + i915_request_add(rq[1]); + if (err) + goto out; + + rq[2] = spinner_create_request(&arg->b.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[2])) { + err = PTR_ERR(rq[2]); + goto out; + } + + i915_request_get(rq[2]); + err = i915_request_await_dma_fence(rq[2], &rq[1]->fence); + i915_request_add(rq[2]); + if (err) + goto out; + + i915_gem_context_set_banned(arg->a.ctx); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + if (i915_request_wait(rq[2], 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq[0]->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + + if (rq[1]->fence.error != 0) { + pr_err("Normal inflight1 request did not complete\n"); + err = -EINVAL; + goto out; + } + + if (rq[2]->fence.error != -EIO) { + pr_err("Cancelled queued request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq[2]); + i915_request_put(rq[1]); + i915_request_put(rq[0]); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_hostile(struct live_preempt_cancel *arg) +{ + struct i915_request *rq; + int err; + + /* Preempt cancel non-preemptible spinner in ELSP0 */ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return 0; + + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags); + rq = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_NOOP); /* preemption disabled */ + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&arg->a.spin, rq)) { + err = -EIO; + goto out; + } + + i915_gem_context_set_banned(arg->a.ctx); + err = intel_engine_pulse(arg->engine); /* force reset */ + if (err) + goto out; + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq); + if (igt_flush_test(arg->engine->i915)) + err = -EIO; + return err; +} + +static int live_preempt_cancel(void *arg) +{ + struct intel_gt *gt = arg; + struct live_preempt_cancel data; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * To cancel an inflight context, we need to first remove it from the + * GPU. That sounds like preemption! Plus a little bit of bookkeeping. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (preempt_client_init(gt, &data.a)) + return -ENOMEM; + if (preempt_client_init(gt, &data.b)) + goto err_client_a; + + for_each_engine(data.engine, gt, id) { + if (!intel_engine_has_preemption(data.engine)) + continue; + + err = __cancel_active0(&data); + if (err) + goto err_wedged; + + err = __cancel_active1(&data); + if (err) + goto err_wedged; + + err = __cancel_queued(&data); + if (err) + goto err_wedged; + + err = __cancel_hostile(&data); + if (err) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&data.b); +err_client_a: + preempt_client_fini(&data.a); + return err; + +err_wedged: + GEM_TRACE_DUMP(); + igt_spinner_end(&data.b.spin); + igt_spinner_end(&data.a.spin); + intel_gt_set_wedged(gt); + goto err_client_b; +} + +static int live_suppress_self_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX) + }; + struct preempt_client a, b; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Verify that if a preemption request does not cause a change in + * the current execution order, the preempt-to-idle injection is + * skipped and that we do not accidentally apply it after the CS + * completion event. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (USES_GUC_SUBMISSION(gt->i915)) + return 0; /* presume black blox */ + + if (intel_vgpu_active(gt->i915)) + return 0; /* GVT forces single port & request submission */ + + if (preempt_client_init(gt, &a)) + return -ENOMEM; + if (preempt_client_init(gt, &b)) + goto err_client_a; + + for_each_engine(engine, gt, id) { + struct i915_request *rq_a, *rq_b; + int depth; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_flush_test(gt->i915)) + goto err_wedged; + + intel_engine_pm_get(engine); + engine->execlists.preempt_hang.count = 0; + + rq_a = spinner_create_request(&a.spin, + a.ctx, engine, + MI_NOOP); + if (IS_ERR(rq_a)) { + err = PTR_ERR(rq_a); + intel_engine_pm_put(engine); + goto err_client_b; + } + + i915_request_add(rq_a); + if (!igt_wait_for_spinner(&a.spin, rq_a)) { + pr_err("First client failed to start\n"); + intel_engine_pm_put(engine); + goto err_wedged; + } + + /* Keep postponing the timer to avoid premature slicing */ + mod_timer(&engine->execlists.timer, jiffies + HZ); + for (depth = 0; depth < 8; depth++) { + rq_b = spinner_create_request(&b.spin, + b.ctx, engine, + MI_NOOP); + if (IS_ERR(rq_b)) { + err = PTR_ERR(rq_b); + intel_engine_pm_put(engine); + goto err_client_b; + } + i915_request_add(rq_b); + + GEM_BUG_ON(i915_request_completed(rq_a)); + engine->schedule(rq_a, &attr); + igt_spinner_end(&a.spin); + + if (!igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client failed to start\n"); + intel_engine_pm_put(engine); + goto err_wedged; + } + + swap(a, b); + rq_a = rq_b; + } + igt_spinner_end(&a.spin); + + if (engine->execlists.preempt_hang.count) { + pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n", + engine->name, + engine->execlists.preempt_hang.count, + depth); + intel_engine_pm_put(engine); + err = -EINVAL; + goto err_client_b; + } + + intel_engine_pm_put(engine); + if (igt_flush_test(gt->i915)) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&b); +err_client_a: + preempt_client_fini(&a); + return err; + +err_wedged: + igt_spinner_end(&b.spin); + igt_spinner_end(&a.spin); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_client_b; +} + +static int __i915_sw_fence_call +dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + return NOTIFY_DONE; +} + +static struct i915_request *dummy_request(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = kzalloc(sizeof(*rq), GFP_KERNEL); + if (!rq) + return NULL; + + rq->engine = engine; + + spin_lock_init(&rq->lock); + INIT_LIST_HEAD(&rq->fence.cb_list); + rq->fence.lock = &rq->lock; + rq->fence.ops = &i915_fence_ops; + + i915_sched_node_init(&rq->sched); + + /* mark this request as permanently incomplete */ + rq->fence.seqno = 1; + BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */ + rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1; + GEM_BUG_ON(i915_request_completed(rq)); + + i915_sw_fence_init(&rq->submit, dummy_notify); + set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); + + spin_lock_init(&rq->lock); + rq->fence.lock = &rq->lock; + INIT_LIST_HEAD(&rq->fence.cb_list); + + return rq; +} + +static void dummy_request_free(struct i915_request *dummy) +{ + /* We have to fake the CS interrupt to kick the next request */ + i915_sw_fence_commit(&dummy->submit); + + i915_request_mark_complete(dummy); + dma_fence_signal(&dummy->fence); + + i915_sched_node_fini(&dummy->sched); + i915_sw_fence_fini(&dummy->submit); + + dma_fence_free(&dummy->fence); +} + +static int live_suppress_wait_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct preempt_client client[4]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + int i; + + /* + * Waiters are given a little priority nudge, but not enough + * to actually cause any preemption. Double check that we do + * not needlessly generate preempt-to-idle cycles. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (preempt_client_init(gt, &client[0])) /* ELSP[0] */ + return -ENOMEM; + if (preempt_client_init(gt, &client[1])) /* ELSP[1] */ + goto err_client_0; + if (preempt_client_init(gt, &client[2])) /* head of queue */ + goto err_client_1; + if (preempt_client_init(gt, &client[3])) /* bystander */ + goto err_client_2; + + for_each_engine(engine, gt, id) { + int depth; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (!engine->emit_init_breadcrumb) + continue; + + for (depth = 0; depth < ARRAY_SIZE(client); depth++) { + struct i915_request *rq[ARRAY_SIZE(client)]; + struct i915_request *dummy; + + engine->execlists.preempt_hang.count = 0; + + dummy = dummy_request(engine); + if (!dummy) + goto err_client_3; + + for (i = 0; i < ARRAY_SIZE(client); i++) { + rq[i] = spinner_create_request(&client[i].spin, + client[i].ctx, engine, + MI_NOOP); + if (IS_ERR(rq[i])) { + err = PTR_ERR(rq[i]); + goto err_wedged; + } + + /* Disable NEWCLIENT promotion */ + __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request, + &dummy->fence); + i915_request_add(rq[i]); + } + + dummy_request_free(dummy); + + GEM_BUG_ON(i915_request_completed(rq[0])); + if (!igt_wait_for_spinner(&client[0].spin, rq[0])) { + pr_err("%s: First client failed to start\n", + engine->name); + goto err_wedged; + } + GEM_BUG_ON(!i915_request_started(rq[0])); + + if (i915_request_wait(rq[depth], + I915_WAIT_PRIORITY, + 1) != -ETIME) { + pr_err("%s: Waiter depth:%d completed!\n", + engine->name, depth); + goto err_wedged; + } + + for (i = 0; i < ARRAY_SIZE(client); i++) + igt_spinner_end(&client[i].spin); + + if (igt_flush_test(gt->i915)) + goto err_wedged; + + if (engine->execlists.preempt_hang.count) { + pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n", + engine->name, + engine->execlists.preempt_hang.count, + depth); + err = -EINVAL; + goto err_client_3; + } + } + } + + err = 0; +err_client_3: + preempt_client_fini(&client[3]); +err_client_2: + preempt_client_fini(&client[2]); +err_client_1: + preempt_client_fini(&client[1]); +err_client_0: + preempt_client_fini(&client[0]); + return err; + +err_wedged: + for (i = 0; i < ARRAY_SIZE(client); i++) + igt_spinner_end(&client[i].spin); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_client_3; +} + +static int live_chain_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct preempt_client hi, lo; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Build a chain AB...BA between two contexts (A, B) and request + * preemption of the last request. It should then complete before + * the previously submitted spinner in B. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (preempt_client_init(gt, &hi)) + return -ENOMEM; + + if (preempt_client_init(gt, &lo)) + goto err_client_hi; + + for_each_engine(engine, gt, id) { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + }; + struct igt_live_test t; + struct i915_request *rq; + int ring_size, count, i; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + + ring_size = rq->wa_tail - rq->head; + if (ring_size < 0) + ring_size += rq->ring->size; + ring_size = rq->ring->size / ring_size; + pr_debug("%s(%s): Using maximum of %d requests\n", + __func__, engine->name, ring_size); + + igt_spinner_end(&lo.spin); + if (i915_request_wait(rq, 0, HZ / 2) < 0) { + pr_err("Timed out waiting to flush %s\n", engine->name); + goto err_wedged; + } + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + goto err_wedged; + } + + for_each_prime_number_from(count, 1, ring_size) { + rq = spinner_create_request(&hi.spin, + hi.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + if (!igt_wait_for_spinner(&hi.spin, rq)) + goto err_wedged; + + rq = spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + + for (i = 0; i < count; i++) { + rq = igt_request_alloc(lo.ctx, engine); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + } + + rq = igt_request_alloc(hi.ctx, engine); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + engine->schedule(rq, &attr); + + igt_spinner_end(&hi.spin); + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + pr_err("Failed to preempt over chain of %d\n", + count); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + goto err_wedged; + } + igt_spinner_end(&lo.spin); + + rq = igt_request_alloc(lo.ctx, engine); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + pr_err("Failed to flush low priority chain of %d requests\n", + count); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + goto err_wedged; + } + } + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_wedged; + } + } + + err = 0; +err_client_lo: + preempt_client_fini(&lo); +err_client_hi: + preempt_client_fini(&hi); + return err; + +err_wedged: + igt_spinner_end(&hi.spin); + igt_spinner_end(&lo.spin); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_client_lo; +} + +static int live_preempt_hang(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct igt_spinner spin_hi, spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (!intel_has_reset_engine(gt)) + return 0; + + if (igt_spinner_init(&spin_hi, gt)) + return -ENOMEM; + + if (igt_spinner_init(&spin_lo, gt)) + goto err_spin_hi; + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); + + for_each_engine(engine, gt, id) { + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_lo, rq)) { + GEM_TRACE("lo spinner failed to start\n"); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + igt_spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + init_completion(&engine->execlists.preempt_hang.completion); + engine->execlists.preempt_hang.inject_hang = true; + + i915_request_add(rq); + + if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion, + HZ / 10)) { + pr_err("Preemption did not occur within timeout!"); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + set_bit(I915_RESET_ENGINE + id, >->reset.flags); + intel_engine_reset(engine, NULL); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + + engine->execlists.preempt_hang.inject_hang = false; + + if (!igt_wait_for_spinner(&spin_hi, rq)) { + GEM_TRACE("hi spinner failed to start\n"); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); + if (igt_flush_test(gt->i915)) { + err = -EIO; + goto err_ctx_lo; + } + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + igt_spinner_fini(&spin_lo); +err_spin_hi: + igt_spinner_fini(&spin_hi); + return err; +} + +static int live_preempt_timeout(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct igt_spinner spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Check that we force preemption to occur by cancelling the previous + * context if it refuses to yield the GPU. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return 0; + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (!intel_has_reset_engine(gt)) + return 0; + + if (igt_spinner_init(&spin_lo, gt)) + return -ENOMEM; + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); + + for_each_engine(engine, gt, id) { + unsigned long saved_timeout; + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_NOOP); /* preemption disabled */ + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_lo, rq)) { + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + rq = igt_request_alloc(ctx_hi, engine); + if (IS_ERR(rq)) { + igt_spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + /* Flush the previous CS ack before changing timeouts */ + while (READ_ONCE(engine->execlists.pending[0])) + cpu_relax(); + + saved_timeout = engine->props.preempt_timeout_ms; + engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ + + i915_request_get(rq); + i915_request_add(rq); + + intel_engine_flush_submission(engine); + engine->props.preempt_timeout_ms = saved_timeout; + + if (i915_request_wait(rq, 0, HZ / 10) < 0) { + intel_gt_set_wedged(gt); + i915_request_put(rq); + err = -ETIME; + goto err_ctx_lo; + } + + igt_spinner_end(&spin_lo); + i915_request_put(rq); + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + igt_spinner_fini(&spin_lo); + return err; +} + +static int random_range(struct rnd_state *rnd, int min, int max) +{ + return i915_prandom_u32_max_state(max - min, rnd) + min; +} + +static int random_priority(struct rnd_state *rnd) +{ + return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); +} + +struct preempt_smoke { + struct intel_gt *gt; + struct i915_gem_context **contexts; + struct intel_engine_cs *engine; + struct drm_i915_gem_object *batch; + unsigned int ncontext; + struct rnd_state prng; + unsigned long count; +}; + +static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) +{ + return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, + &smoke->prng)]; +} + +static int smoke_submit(struct preempt_smoke *smoke, + struct i915_gem_context *ctx, int prio, + struct drm_i915_gem_object *batch) +{ + struct i915_request *rq; + struct i915_vma *vma = NULL; + int err = 0; + + if (batch) { + struct i915_address_space *vm; + + vm = i915_gem_context_get_vm_rcu(ctx); + vma = i915_vma_instance(batch, vm, NULL); + i915_vm_put(vm); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + } + + ctx->sched.priority = prio; + + rq = igt_request_alloc(ctx, smoke->engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto unpin; + } + + if (vma) { + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + if (!err) + err = rq->engine->emit_bb_start(rq, + vma->node.start, + PAGE_SIZE, 0); + i915_vma_unlock(vma); + } + + i915_request_add(rq); + +unpin: + if (vma) + i915_vma_unpin(vma); + + return err; +} + +static int smoke_crescendo_thread(void *arg) +{ + struct preempt_smoke *smoke = arg; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_gem_context *ctx = smoke_context(smoke); + int err; + + err = smoke_submit(smoke, + ctx, count % I915_PRIORITY_MAX, + smoke->batch); + if (err) + return err; + + count++; + } while (!__igt_timeout(end_time, NULL)); + + smoke->count = count; + return 0; +} + +static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) +#define BATCH BIT(0) +{ + struct task_struct *tsk[I915_NUM_ENGINES] = {}; + struct preempt_smoke arg[I915_NUM_ENGINES]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long count; + int err = 0; + + for_each_engine(engine, smoke->gt, id) { + arg[id] = *smoke; + arg[id].engine = engine; + if (!(flags & BATCH)) + arg[id].batch = NULL; + arg[id].count = 0; + + tsk[id] = kthread_run(smoke_crescendo_thread, &arg, + "igt/smoke:%d", id); + if (IS_ERR(tsk[id])) { + err = PTR_ERR(tsk[id]); + break; + } + get_task_struct(tsk[id]); + } + + yield(); /* start all threads before we kthread_stop() */ + + count = 0; + for_each_engine(engine, smoke->gt, id) { + int status; + + if (IS_ERR_OR_NULL(tsk[id])) + continue; + + status = kthread_stop(tsk[id]); + if (status && !err) + err = status; + + count += arg[id].count; + + put_task_struct(tsk[id]); + } + + pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", + count, flags, + RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); + return 0; +} + +static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) +{ + enum intel_engine_id id; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + for_each_engine(smoke->engine, smoke->gt, id) { + struct i915_gem_context *ctx = smoke_context(smoke); + int err; + + err = smoke_submit(smoke, + ctx, random_priority(&smoke->prng), + flags & BATCH ? smoke->batch : NULL); + if (err) + return err; + + count++; + } + } while (!__igt_timeout(end_time, NULL)); + + pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", + count, flags, + RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); + return 0; +} + +static int live_preempt_smoke(void *arg) +{ + struct preempt_smoke smoke = { + .gt = arg, + .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), + .ncontext = 1024, + }; + const unsigned int phase[] = { 0, BATCH }; + struct igt_live_test t; + int err = -ENOMEM; + u32 *cs; + int n; + + if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915)) + return 0; + + smoke.contexts = kmalloc_array(smoke.ncontext, + sizeof(*smoke.contexts), + GFP_KERNEL); + if (!smoke.contexts) + return -ENOMEM; + + smoke.batch = + i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE); + if (IS_ERR(smoke.batch)) { + err = PTR_ERR(smoke.batch); + goto err_free; + } + + cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_batch; + } + for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) + cs[n] = MI_ARB_CHECK; + cs[n] = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(smoke.batch); + i915_gem_object_unpin_map(smoke.batch); + + if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) { + err = -EIO; + goto err_batch; + } + + for (n = 0; n < smoke.ncontext; n++) { + smoke.contexts[n] = kernel_context(smoke.gt->i915); + if (!smoke.contexts[n]) + goto err_ctx; + } + + for (n = 0; n < ARRAY_SIZE(phase); n++) { + err = smoke_crescendo(&smoke, phase[n]); + if (err) + goto err_ctx; + + err = smoke_random(&smoke, phase[n]); + if (err) + goto err_ctx; + } + +err_ctx: + if (igt_live_test_end(&t)) + err = -EIO; + + for (n = 0; n < smoke.ncontext; n++) { + if (!smoke.contexts[n]) + break; + kernel_context_close(smoke.contexts[n]); + } + +err_batch: + i915_gem_object_put(smoke.batch); +err_free: + kfree(smoke.contexts); + + return err; +} + +static int nop_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling, + unsigned int nctx, + unsigned int flags) +#define CHAIN BIT(0) +{ + IGT_TIMEOUT(end_time); + struct i915_request *request[16]; + struct i915_gem_context *ctx[16]; + struct intel_context *ve[16]; + unsigned long n, prime, nc; + struct igt_live_test t; + ktime_t times[2] = {}; + int err; + + GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx)); + + for (n = 0; n < nctx; n++) { + ctx[n] = kernel_context(gt->i915); + if (!ctx[n]) { + err = -ENOMEM; + nctx = n; + goto out; + } + + ve[n] = intel_execlists_create_virtual(ctx[n], + siblings, nsibling); + if (IS_ERR(ve[n])) { + kernel_context_close(ctx[n]); + err = PTR_ERR(ve[n]); + nctx = n; + goto out; + } + + err = intel_context_pin(ve[n]); + if (err) { + intel_context_put(ve[n]); + kernel_context_close(ctx[n]); + nctx = n; + goto out; + } + } + + err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name); + if (err) + goto out; + + for_each_prime_number_from(prime, 1, 8192) { + times[1] = ktime_get_raw(); + + if (flags & CHAIN) { + for (nc = 0; nc < nctx; nc++) { + for (n = 0; n < prime; n++) { + request[nc] = + i915_request_create(ve[nc]); + if (IS_ERR(request[nc])) { + err = PTR_ERR(request[nc]); + goto out; + } + + i915_request_add(request[nc]); + } + } + } else { + for (n = 0; n < prime; n++) { + for (nc = 0; nc < nctx; nc++) { + request[nc] = + i915_request_create(ve[nc]); + if (IS_ERR(request[nc])) { + err = PTR_ERR(request[nc]); + goto out; + } + + i915_request_add(request[nc]); + } + } + } + + for (nc = 0; nc < nctx; nc++) { + if (i915_request_wait(request[nc], 0, HZ / 10) < 0) { + pr_err("%s(%s): wait for %llx:%lld timed out\n", + __func__, ve[0]->engine->name, + request[nc]->fence.context, + request[nc]->fence.seqno); + + GEM_TRACE("%s(%s) failed at request %llx:%lld\n", + __func__, ve[0]->engine->name, + request[nc]->fence.context, + request[nc]->fence.seqno); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + break; + } + } + + times[1] = ktime_sub(ktime_get_raw(), times[1]); + if (prime == 1) + times[0] = times[1]; + + if (__igt_timeout(end_time, NULL)) + break; + } + + err = igt_live_test_end(&t); + if (err) + goto out; + + pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n", + nctx, ve[0]->engine->name, ktime_to_ns(times[0]), + prime, div64_u64(ktime_to_ns(times[1]), prime)); + +out: + if (igt_flush_test(gt->i915)) + err = -EIO; + + for (nc = 0; nc < nctx; nc++) { + intel_context_unpin(ve[nc]); + intel_context_put(ve[nc]); + kernel_context_close(ctx[nc]); + } + return err; +} + +static int live_virtual_engine(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned int class, inst; + int err; + + if (USES_GUC_SUBMISSION(gt->i915)) + return 0; + + for_each_engine(engine, gt, id) { + err = nop_virtual_engine(gt, &engine, 1, 1, 0); + if (err) { + pr_err("Failed to wrap engine %s: err=%d\n", + engine->name, err); + return err; + } + } + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + int nsibling, n; + + nsibling = 0; + for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { + if (!gt->engine_class[class][inst]) + continue; + + siblings[nsibling++] = gt->engine_class[class][inst]; + } + if (nsibling < 2) + continue; + + for (n = 1; n <= nsibling + 1; n++) { + err = nop_virtual_engine(gt, siblings, nsibling, + n, 0); + if (err) + return err; + } + + err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN); + if (err) + return err; + } + + return 0; +} + +static int mask_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling) +{ + struct i915_request *request[MAX_ENGINE_INSTANCE + 1]; + struct i915_gem_context *ctx; + struct intel_context *ve; + struct igt_live_test t; + unsigned int n; + int err; + + /* + * Check that by setting the execution mask on a request, we can + * restrict it to our desired engine within the virtual engine. + */ + + ctx = kernel_context(gt->i915); + if (!ctx) + return -ENOMEM; + + ve = intel_execlists_create_virtual(ctx, siblings, nsibling); + if (IS_ERR(ve)) { + err = PTR_ERR(ve); + goto out_close; + } + + err = intel_context_pin(ve); + if (err) + goto out_put; + + err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); + if (err) + goto out_unpin; + + for (n = 0; n < nsibling; n++) { + request[n] = i915_request_create(ve); + if (IS_ERR(request[n])) { + err = PTR_ERR(request[n]); + nsibling = n; + goto out; + } + + /* Reverse order as it's more likely to be unnatural */ + request[n]->execution_mask = siblings[nsibling - n - 1]->mask; + + i915_request_get(request[n]); + i915_request_add(request[n]); + } + + for (n = 0; n < nsibling; n++) { + if (i915_request_wait(request[n], 0, HZ / 10) < 0) { + pr_err("%s(%s): wait for %llx:%lld timed out\n", + __func__, ve->engine->name, + request[n]->fence.context, + request[n]->fence.seqno); + + GEM_TRACE("%s(%s) failed at request %llx:%lld\n", + __func__, ve->engine->name, + request[n]->fence.context, + request[n]->fence.seqno); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto out; + } + + if (request[n]->engine != siblings[nsibling - n - 1]) { + pr_err("Executed on wrong sibling '%s', expected '%s'\n", + request[n]->engine->name, + siblings[nsibling - n - 1]->name); + err = -EINVAL; + goto out; + } + } + + err = igt_live_test_end(&t); +out: + if (igt_flush_test(gt->i915)) + err = -EIO; + + for (n = 0; n < nsibling; n++) + i915_request_put(request[n]); + +out_unpin: + intel_context_unpin(ve); +out_put: + intel_context_put(ve); +out_close: + kernel_context_close(ctx); + return err; +} + +static int live_virtual_mask(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + unsigned int class, inst; + int err; + + if (USES_GUC_SUBMISSION(gt->i915)) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + unsigned int nsibling; + + nsibling = 0; + for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { + if (!gt->engine_class[class][inst]) + break; + + siblings[nsibling++] = gt->engine_class[class][inst]; + } + if (nsibling < 2) + continue; + + err = mask_virtual_engine(gt, siblings, nsibling); + if (err) + return err; + } + + return 0; +} + +static int preserved_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling) +{ + struct i915_request *last = NULL; + struct i915_gem_context *ctx; + struct intel_context *ve; + struct i915_vma *scratch; + struct igt_live_test t; + unsigned int n; + int err = 0; + u32 *cs; + + ctx = kernel_context(gt->i915); + if (!ctx) + return -ENOMEM; + + scratch = create_scratch(siblings[0]->gt); + if (IS_ERR(scratch)) { + err = PTR_ERR(scratch); + goto out_close; + } + + ve = intel_execlists_create_virtual(ctx, siblings, nsibling); + if (IS_ERR(ve)) { + err = PTR_ERR(ve); + goto out_scratch; + } + + err = intel_context_pin(ve); + if (err) + goto out_put; + + err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); + if (err) + goto out_unpin; + + for (n = 0; n < NUM_GPR_DW; n++) { + struct intel_engine_cs *engine = siblings[n % nsibling]; + struct i915_request *rq; + + rq = i915_request_create(ve); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_end; + } + + i915_request_put(last); + last = i915_request_get(rq); + + cs = intel_ring_begin(rq, 8); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto out_end; + } + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = CS_GPR(engine, n); + *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); + *cs++ = 0; + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW); + *cs++ = n + 1; + + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + /* Restrict this request to run on a particular engine */ + rq->execution_mask = engine->mask; + i915_request_add(rq); + } + + if (i915_request_wait(last, 0, HZ / 5) < 0) { + err = -ETIME; + goto out_end; + } + + cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto out_end; + } + + for (n = 0; n < NUM_GPR_DW; n++) { + if (cs[n] != n) { + pr_err("Incorrect value[%d] found for GPR[%d]\n", + cs[n], n); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_map(scratch->obj); + +out_end: + if (igt_live_test_end(&t)) + err = -EIO; + i915_request_put(last); +out_unpin: + intel_context_unpin(ve); +out_put: + intel_context_put(ve); +out_scratch: + i915_vma_unpin_and_release(&scratch, 0); +out_close: + kernel_context_close(ctx); + return err; +} + +static int live_virtual_preserved(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + unsigned int class, inst; + + /* + * Check that the context image retains non-privileged (user) registers + * from one engine to the next. For this we check that the CS_GPR + * are preserved. + */ + + if (USES_GUC_SUBMISSION(gt->i915)) + return 0; + + /* As we use CS_GPR we cannot run before they existed on all engines. */ + if (INTEL_GEN(gt->i915) < 9) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + int nsibling, err; + + nsibling = 0; + for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { + if (!gt->engine_class[class][inst]) + continue; + + siblings[nsibling++] = gt->engine_class[class][inst]; + } + if (nsibling < 2) + continue; + + err = preserved_virtual_engine(gt, siblings, nsibling); + if (err) + return err; + } + + return 0; +} + +static int bond_virtual_engine(struct intel_gt *gt, + unsigned int class, + struct intel_engine_cs **siblings, + unsigned int nsibling, + unsigned int flags) +#define BOND_SCHEDULE BIT(0) +{ + struct intel_engine_cs *master; + struct i915_gem_context *ctx; + struct i915_request *rq[16]; + enum intel_engine_id id; + unsigned long n; + int err; + + GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1); + + ctx = kernel_context(gt->i915); + if (!ctx) + return -ENOMEM; + + err = 0; + rq[0] = ERR_PTR(-ENOMEM); + for_each_engine(master, gt, id) { + struct i915_sw_fence fence = {}; + + if (master->class == class) + continue; + + memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq)); + + rq[0] = igt_request_alloc(ctx, master); + if (IS_ERR(rq[0])) { + err = PTR_ERR(rq[0]); + goto out; + } + i915_request_get(rq[0]); + + if (flags & BOND_SCHEDULE) { + onstack_fence_init(&fence); + err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit, + &fence, + GFP_KERNEL); + } + i915_request_add(rq[0]); + if (err < 0) + goto out; + + for (n = 0; n < nsibling; n++) { + struct intel_context *ve; + + ve = intel_execlists_create_virtual(ctx, + siblings, + nsibling); + if (IS_ERR(ve)) { + err = PTR_ERR(ve); + onstack_fence_fini(&fence); + goto out; + } + + err = intel_virtual_engine_attach_bond(ve->engine, + master, + siblings[n]); + if (err) { + intel_context_put(ve); + onstack_fence_fini(&fence); + goto out; + } + + err = intel_context_pin(ve); + intel_context_put(ve); + if (err) { + onstack_fence_fini(&fence); + goto out; + } + + rq[n + 1] = i915_request_create(ve); + intel_context_unpin(ve); + if (IS_ERR(rq[n + 1])) { + err = PTR_ERR(rq[n + 1]); + onstack_fence_fini(&fence); + goto out; + } + i915_request_get(rq[n + 1]); + + err = i915_request_await_execution(rq[n + 1], + &rq[0]->fence, + ve->engine->bond_execute); + i915_request_add(rq[n + 1]); + if (err < 0) { + onstack_fence_fini(&fence); + goto out; + } + } + onstack_fence_fini(&fence); + + if (i915_request_wait(rq[0], 0, HZ / 10) < 0) { + pr_err("Master request did not execute (on %s)!\n", + rq[0]->engine->name); + err = -EIO; + goto out; + } + + for (n = 0; n < nsibling; n++) { + if (i915_request_wait(rq[n + 1], 0, + MAX_SCHEDULE_TIMEOUT) < 0) { + err = -EIO; + goto out; + } + + if (rq[n + 1]->engine != siblings[n]) { + pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n", + siblings[n]->name, + rq[n + 1]->engine->name, + rq[0]->engine->name); + err = -EINVAL; + goto out; + } + } + + for (n = 0; !IS_ERR(rq[n]); n++) + i915_request_put(rq[n]); + rq[0] = ERR_PTR(-ENOMEM); + } + +out: + for (n = 0; !IS_ERR(rq[n]); n++) + i915_request_put(rq[n]); + if (igt_flush_test(gt->i915)) + err = -EIO; + + kernel_context_close(ctx); + return err; +} + +static int live_virtual_bond(void *arg) +{ + static const struct phase { + const char *name; + unsigned int flags; + } phases[] = { + { "", 0 }, + { "schedule", BOND_SCHEDULE }, + { }, + }; + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + unsigned int class, inst; + int err; + + if (USES_GUC_SUBMISSION(gt->i915)) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + const struct phase *p; + int nsibling; + + nsibling = 0; + for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { + if (!gt->engine_class[class][inst]) + break; + + GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings)); + siblings[nsibling++] = gt->engine_class[class][inst]; + } + if (nsibling < 2) + continue; + + for (p = phases; p->name; p++) { + err = bond_virtual_engine(gt, + class, siblings, nsibling, + p->flags); + if (err) { + pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n", + __func__, p->name, class, nsibling, err); + return err; + } + } + } + + return 0; +} + +int intel_execlists_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_sanitycheck), + SUBTEST(live_unlite_switch), + SUBTEST(live_unlite_preempt), + SUBTEST(live_timeslice_preempt), + SUBTEST(live_timeslice_queue), + SUBTEST(live_busywait_preempt), + SUBTEST(live_preempt), + SUBTEST(live_late_preempt), + SUBTEST(live_nopreempt), + SUBTEST(live_preempt_cancel), + SUBTEST(live_suppress_self_preempt), + SUBTEST(live_suppress_wait_preempt), + SUBTEST(live_chain_preempt), + SUBTEST(live_preempt_hang), + SUBTEST(live_preempt_timeout), + SUBTEST(live_preempt_smoke), + SUBTEST(live_virtual_engine), + SUBTEST(live_virtual_mask), + SUBTEST(live_virtual_preserved), + SUBTEST(live_virtual_bond), + }; + + if (!HAS_EXECLISTS(i915)) + return 0; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} + +static void hexdump(const void *buf, size_t len) +{ + const size_t rowsize = 8 * sizeof(u32); + const void *prev = NULL; + bool skip = false; + size_t pos; + + for (pos = 0; pos < len; pos += rowsize) { + char line[128]; + + if (prev && !memcmp(prev, buf + pos, rowsize)) { + if (!skip) { + pr_info("*\n"); + skip = true; + } + continue; + } + + WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, + rowsize, sizeof(u32), + line, sizeof(line), + false) >= sizeof(line)); + pr_info("[%04zx] %s\n", pos, line); + + prev = buf + pos; + skip = false; + } +} + +static int live_lrc_layout(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 *mem; + int err; + + /* + * Check the registers offsets we use to create the initial reg state + * match the layout saved by HW. + */ + + mem = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + err = 0; + for_each_engine(engine, gt, id) { + u32 *hw, *lrc; + int dw; + + if (!engine->default_state) + continue; + + hw = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (IS_ERR(hw)) { + err = PTR_ERR(hw); + break; + } + hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + + lrc = memset(mem, 0, PAGE_SIZE); + execlists_init_reg_state(lrc, + engine->kernel_context, + engine, + engine->kernel_context->ring, + true); + + dw = 0; + do { + u32 lri = hw[dw]; + + if (lri == 0) { + dw++; + continue; + } + + if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + pr_err("%s: Expected LRI command at dword %d, found %08x\n", + engine->name, dw, lri); + err = -EINVAL; + break; + } + + if (lrc[dw] != lri) { + pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n", + engine->name, dw, lri, lrc[dw]); + err = -EINVAL; + break; + } + + lri &= 0x7f; + lri++; + dw++; + + while (lri) { + if (hw[dw] != lrc[dw]) { + pr_err("%s: Different registers found at dword %d, expected %x, found %x\n", + engine->name, dw, hw[dw], lrc[dw]); + err = -EINVAL; + break; + } + + /* + * Skip over the actual register value as we + * expect that to differ. + */ + dw += 2; + lri -= 2; + } + } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END); + + if (err) { + pr_info("%s: HW register image:\n", engine->name); + hexdump(hw, PAGE_SIZE); + + pr_info("%s: SW register image:\n", engine->name); + hexdump(lrc, PAGE_SIZE); + } + + i915_gem_object_unpin_map(engine->default_state); + if (err) + break; + } + + kfree(mem); + return err; +} + +static int find_offset(const u32 *lri, u32 offset) +{ + int i; + + for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) + if (lri[i] == offset) + return i; + + return -1; +} + +static int live_lrc_fixed(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * Check the assumed register offsets match the actual locations in + * the context image. + */ + + for_each_engine(engine, gt, id) { + const struct { + u32 reg; + u32 offset; + const char *name; + } tbl[] = { + { + i915_mmio_reg_offset(RING_START(engine->mmio_base)), + CTX_RING_BUFFER_START - 1, + "RING_START" + }, + { + i915_mmio_reg_offset(RING_CTL(engine->mmio_base)), + CTX_RING_BUFFER_CONTROL - 1, + "RING_CTL" + }, + { + i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)), + CTX_RING_HEAD - 1, + "RING_HEAD" + }, + { + i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)), + CTX_RING_TAIL - 1, + "RING_TAIL" + }, + { + i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)), + lrc_ring_mi_mode(engine), + "RING_MI_MODE" + }, + { + engine->mmio_base + 0x110, + CTX_BB_STATE - 1, + "BB_STATE" + }, + { }, + }, *t; + u32 *hw; + + if (!engine->default_state) + continue; + + hw = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (IS_ERR(hw)) { + err = PTR_ERR(hw); + break; + } + hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + + for (t = tbl; t->name; t++) { + int dw = find_offset(hw, t->reg); + + if (dw != t->offset) { + pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n", + engine->name, + t->name, + t->reg, + dw, + t->offset); + err = -EINVAL; + } + } + + i915_gem_object_unpin_map(engine->default_state); + } + + return err; +} + +static int __live_lrc_state(struct i915_gem_context *fixme, + struct intel_engine_cs *engine, + struct i915_vma *scratch) +{ + struct intel_context *ce; + struct i915_request *rq; + enum { + RING_START_IDX = 0, + RING_TAIL_IDX, + MAX_IDX + }; + u32 expected[MAX_IDX]; + u32 *cs; + int err; + int n; + + ce = intel_context_create(fixme, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_pin(ce); + if (err) + goto err_put; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + cs = intel_ring_begin(rq, 4 * MAX_IDX); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + i915_request_add(rq); + goto err_unpin; + } + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base)); + *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32); + *cs++ = 0; + + expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma); + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)); + *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32); + *cs++ = 0; + + i915_request_get(rq); + i915_request_add(rq); + + intel_engine_flush_submission(engine); + expected[RING_TAIL_IDX] = ce->ring->tail; + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -ETIME; + goto err_rq; + } + + cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_rq; + } + + for (n = 0; n < MAX_IDX; n++) { + if (cs[n] != expected[n]) { + pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n", + engine->name, n, cs[n], expected[n]); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_map(scratch->obj); + +err_rq: + i915_request_put(rq); +err_unpin: + intel_context_unpin(ce); +err_put: + intel_context_put(ce); + return err; +} + +static int live_lrc_state(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *fixme; + struct i915_vma *scratch; + enum intel_engine_id id; + int err = 0; + + /* + * Check the live register state matches what we expect for this + * intel_context. + */ + + fixme = kernel_context(gt->i915); + if (!fixme) + return -ENOMEM; + + scratch = create_scratch(gt); + if (IS_ERR(scratch)) { + err = PTR_ERR(scratch); + goto out_close; + } + + for_each_engine(engine, gt, id) { + err = __live_lrc_state(fixme, engine, scratch); + if (err) + break; + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + + i915_vma_unpin_and_release(&scratch, 0); +out_close: + kernel_context_close(fixme); + return err; +} + +static int gpr_make_dirty(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + u32 *cs; + int n; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW); + for (n = 0; n < NUM_GPR_DW; n++) { + *cs++ = CS_GPR(engine, n); + *cs++ = STACK_MAGIC; + } + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + i915_request_add(rq); + + return 0; +} + +static int __live_gpr_clear(struct i915_gem_context *fixme, + struct intel_engine_cs *engine, + struct i915_vma *scratch) +{ + struct intel_context *ce; + struct i915_request *rq; + u32 *cs; + int err; + int n; + + if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS) + return 0; /* GPR only on rcs0 for gen8 */ + + err = gpr_make_dirty(engine); + if (err) + return err; + + ce = intel_context_create(fixme, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_put; + } + + cs = intel_ring_begin(rq, 4 * NUM_GPR_DW); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + i915_request_add(rq); + goto err_put; + } + + for (n = 0; n < NUM_GPR_DW; n++) { + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = CS_GPR(engine, n); + *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); + *cs++ = 0; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -ETIME; + goto err_rq; + } + + cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_rq; + } + + for (n = 0; n < NUM_GPR_DW; n++) { + if (cs[n]) { + pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n", + engine->name, + n / 2, n & 1 ? "udw" : "ldw", + cs[n]); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_map(scratch->obj); + +err_rq: + i915_request_put(rq); +err_put: + intel_context_put(ce); + return err; +} + +static int live_gpr_clear(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *fixme; + struct i915_vma *scratch; + enum intel_engine_id id; + int err = 0; + + /* + * Check that GPR registers are cleared in new contexts as we need + * to avoid leaking any information from previous contexts. + */ + + fixme = kernel_context(gt->i915); + if (!fixme) + return -ENOMEM; + + scratch = create_scratch(gt); + if (IS_ERR(scratch)) { + err = PTR_ERR(scratch); + goto out_close; + } + + for_each_engine(engine, gt, id) { + err = __live_gpr_clear(fixme, engine, scratch); + if (err) + break; + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + + i915_vma_unpin_and_release(&scratch, 0); +out_close: + kernel_context_close(fixme); + return err; +} + +int intel_lrc_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_lrc_layout), + SUBTEST(live_lrc_fixed), + SUBTEST(live_lrc_state), + SUBTEST(live_gpr_clear), + }; + + if (!HAS_LOGICAL_RING_CONTEXTS(i915)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c new file mode 100644 index 000000000000..6ad6aca315f6 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + */ + +#include "i915_selftest.h" +#include "selftests/igt_reset.h" +#include "selftests/igt_atomic.h" + +static int igt_global_reset(void *arg) +{ + struct intel_gt *gt = arg; + unsigned int reset_count; + intel_wakeref_t wakeref; + int err = 0; + + /* Check that we can issue a global GPU reset */ + + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + reset_count = i915_reset_count(>->i915->gpu_error); + + intel_gt_reset(gt, ALL_ENGINES, NULL); + + if (i915_reset_count(>->i915->gpu_error) == reset_count) { + pr_err("No GPU reset recorded!\n"); + err = -EINVAL; + } + + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + igt_global_reset_unlock(gt); + + if (intel_gt_is_wedged(gt)) + err = -EIO; + + return err; +} + +static int igt_wedged_reset(void *arg) +{ + struct intel_gt *gt = arg; + intel_wakeref_t wakeref; + + /* Check that we can recover a wedged device with a GPU reset */ + + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + intel_gt_set_wedged(gt); + + GEM_BUG_ON(!intel_gt_is_wedged(gt)); + intel_gt_reset(gt, ALL_ENGINES, NULL); + + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + igt_global_reset_unlock(gt); + + return intel_gt_is_wedged(gt) ? -EIO : 0; +} + +static int igt_atomic_reset(void *arg) +{ + struct intel_gt *gt = arg; + const typeof(*igt_atomic_phases) *p; + int err = 0; + + /* Check that the resets are usable from atomic context */ + + intel_gt_pm_get(gt); + igt_global_reset_lock(gt); + + /* Flush any requests before we get started and check basics */ + if (!igt_force_reset(gt)) + goto unlock; + + for (p = igt_atomic_phases; p->name; p++) { + intel_engine_mask_t awake; + + GEM_TRACE("__intel_gt_reset under %s\n", p->name); + + awake = reset_prepare(gt); + p->critical_section_begin(); + + err = __intel_gt_reset(gt, ALL_ENGINES); + + p->critical_section_end(); + reset_finish(gt, awake); + + if (err) { + pr_err("__intel_gt_reset failed under %s\n", p->name); + break; + } + } + + /* As we poke around the guts, do a full reset before continuing. */ + igt_force_reset(gt); + +unlock: + igt_global_reset_unlock(gt); + intel_gt_pm_put(gt); + + return err; +} + +static int igt_atomic_engine_reset(void *arg) +{ + struct intel_gt *gt = arg; + const typeof(*igt_atomic_phases) *p; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that the resets are usable from atomic context */ + + if (!intel_has_reset_engine(gt)) + return 0; + + if (USES_GUC_SUBMISSION(gt->i915)) + return 0; + + intel_gt_pm_get(gt); + igt_global_reset_lock(gt); + + /* Flush any requests before we get started and check basics */ + if (!igt_force_reset(gt)) + goto out_unlock; + + for_each_engine(engine, gt, id) { + tasklet_disable(&engine->execlists.tasklet); + intel_engine_pm_get(engine); + + for (p = igt_atomic_phases; p->name; p++) { + GEM_TRACE("intel_engine_reset(%s) under %s\n", + engine->name, p->name); + + p->critical_section_begin(); + err = intel_engine_reset(engine, NULL); + p->critical_section_end(); + + if (err) { + pr_err("intel_engine_reset(%s) failed under %s\n", + engine->name, p->name); + break; + } + } + + intel_engine_pm_put(engine); + tasklet_enable(&engine->execlists.tasklet); + if (err) + break; + } + + /* As we poke around the guts, do a full reset before continuing. */ + igt_force_reset(gt); + +out_unlock: + igt_global_reset_unlock(gt); + intel_gt_pm_put(gt); + + return err; +} + +int intel_reset_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_global_reset), /* attempt to recover GPU first */ + SUBTEST(igt_wedged_reset), + SUBTEST(igt_atomic_reset), + SUBTEST(igt_atomic_engine_reset), + }; + struct intel_gt *gt = &i915->gt; + + if (!intel_has_gpu_reset(gt)) + return 0; + + if (intel_gt_is_wedged(gt)) + return -EIO; /* we're long past hope of a successful reset */ + + return intel_gt_live_subtests(tests, gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c new file mode 100644 index 000000000000..f04a59fe5d2c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -0,0 +1,838 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017-2018 Intel Corporation + */ + +#include <linux/prime_numbers.h> + +#include "intel_engine_pm.h" +#include "intel_gt.h" +#include "intel_gt_requests.h" +#include "intel_ring.h" + +#include "../selftests/i915_random.h" +#include "../i915_selftest.h" + +#include "../selftests/igt_flush_test.h" +#include "../selftests/mock_gem_device.h" +#include "selftests/mock_timeline.h" + +static struct page *hwsp_page(struct intel_timeline *tl) +{ + struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + return sg_page(obj->mm.pages->sgl); +} + +static unsigned long hwsp_cacheline(struct intel_timeline *tl) +{ + unsigned long address = (unsigned long)page_address(hwsp_page(tl)); + + return (address + tl->hwsp_offset) / CACHELINE_BYTES; +} + +#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES) + +struct mock_hwsp_freelist { + struct intel_gt *gt; + struct radix_tree_root cachelines; + struct intel_timeline **history; + unsigned long count, max; + struct rnd_state prng; +}; + +enum { + SHUFFLE = BIT(0), +}; + +static void __mock_hwsp_record(struct mock_hwsp_freelist *state, + unsigned int idx, + struct intel_timeline *tl) +{ + tl = xchg(&state->history[idx], tl); + if (tl) { + radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); + intel_timeline_put(tl); + } +} + +static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, + unsigned int count, + unsigned int flags) +{ + struct intel_timeline *tl; + unsigned int idx; + + while (count--) { + unsigned long cacheline; + int err; + + tl = intel_timeline_create(state->gt, NULL); + if (IS_ERR(tl)) + return PTR_ERR(tl); + + cacheline = hwsp_cacheline(tl); + err = radix_tree_insert(&state->cachelines, cacheline, tl); + if (err) { + if (err == -EEXIST) { + pr_err("HWSP cacheline %lu already used; duplicate allocation!\n", + cacheline); + } + intel_timeline_put(tl); + return err; + } + + idx = state->count++ % state->max; + __mock_hwsp_record(state, idx, tl); + } + + if (flags & SHUFFLE) + i915_prandom_shuffle(state->history, + sizeof(*state->history), + min(state->count, state->max), + &state->prng); + + count = i915_prandom_u32_max_state(min(state->count, state->max), + &state->prng); + while (count--) { + idx = --state->count % state->max; + __mock_hwsp_record(state, idx, NULL); + } + + return 0; +} + +static int mock_hwsp_freelist(void *arg) +{ + struct mock_hwsp_freelist state; + struct drm_i915_private *i915; + const struct { + const char *name; + unsigned int flags; + } phases[] = { + { "linear", 0 }, + { "shuffled", SHUFFLE }, + { }, + }, *p; + unsigned int na; + int err = 0; + + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + + INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL); + state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed); + + state.gt = &i915->gt; + + /* + * Create a bunch of timelines and check that their HWSP do not overlap. + * Free some, and try again. + */ + + state.max = PAGE_SIZE / sizeof(*state.history); + state.count = 0; + state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL); + if (!state.history) { + err = -ENOMEM; + goto err_put; + } + + for (p = phases; p->name; p++) { + pr_debug("%s(%s)\n", __func__, p->name); + for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) { + err = __mock_hwsp_timeline(&state, na, p->flags); + if (err) + goto out; + } + } + +out: + for (na = 0; na < state.max; na++) + __mock_hwsp_record(&state, na, NULL); + kfree(state.history); +err_put: + drm_dev_put(&i915->drm); + return err; +} + +struct __igt_sync { + const char *name; + u32 seqno; + bool expected; + bool set; +}; + +static int __igt_sync(struct intel_timeline *tl, + u64 ctx, + const struct __igt_sync *p, + const char *name) +{ + int ret; + + if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { + pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n", + name, p->name, ctx, p->seqno, yesno(p->expected)); + return -EINVAL; + } + + if (p->set) { + ret = __intel_timeline_sync_set(tl, ctx, p->seqno); + if (ret) + return ret; + } + + return 0; +} + +static int igt_sync(void *arg) +{ + const struct __igt_sync pass[] = { + { "unset", 0, false, false }, + { "new", 0, false, true }, + { "0a", 0, true, true }, + { "1a", 1, false, true }, + { "1b", 1, true, true }, + { "0b", 0, true, false }, + { "2a", 2, false, true }, + { "4", 4, false, true }, + { "INT_MAX", INT_MAX, false, true }, + { "INT_MAX-1", INT_MAX-1, true, false }, + { "INT_MAX+1", (u32)INT_MAX+1, false, true }, + { "INT_MAX", INT_MAX, true, false }, + { "UINT_MAX", UINT_MAX, false, true }, + { "wrap", 0, false, true }, + { "unwrap", UINT_MAX, true, false }, + {}, + }, *p; + struct intel_timeline tl; + int order, offset; + int ret = -ENODEV; + + mock_timeline_init(&tl, 0); + for (p = pass; p->name; p++) { + for (order = 1; order < 64; order++) { + for (offset = -1; offset <= (order > 1); offset++) { + u64 ctx = BIT_ULL(order) + offset; + + ret = __igt_sync(&tl, ctx, p, "1"); + if (ret) + goto out; + } + } + } + mock_timeline_fini(&tl); + + mock_timeline_init(&tl, 0); + for (order = 1; order < 64; order++) { + for (offset = -1; offset <= (order > 1); offset++) { + u64 ctx = BIT_ULL(order) + offset; + + for (p = pass; p->name; p++) { + ret = __igt_sync(&tl, ctx, p, "2"); + if (ret) + goto out; + } + } + } + +out: + mock_timeline_fini(&tl); + return ret; +} + +static unsigned int random_engine(struct rnd_state *rnd) +{ + return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd); +} + +static int bench_sync(void *arg) +{ + struct rnd_state prng; + struct intel_timeline tl; + unsigned long end_time, count; + u64 prng32_1M; + ktime_t kt; + int order, last_order; + + mock_timeline_init(&tl, 0); + + /* Lookups from cache are very fast and so the random number generation + * and the loop itself becomes a significant factor in the per-iteration + * timings. We try to compensate the results by measuring the overhead + * of the prng and subtract it from the reported results. + */ + prandom_seed_state(&prng, i915_selftest.random_seed); + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + u32 x; + + /* Make sure the compiler doesn't optimise away the prng call */ + WRITE_ONCE(x, prandom_u32_state(&prng)); + + count++; + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + pr_debug("%s: %lu random evaluations, %lluns/prng\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count); + + /* Benchmark (only) setting random context ids */ + prandom_seed_state(&prng, i915_selftest.random_seed); + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + u64 id = i915_prandom_u64_state(&prng); + + __intel_timeline_sync_set(&tl, id, 0); + count++; + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); + pr_info("%s: %lu random insertions, %lluns/insert\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + + /* Benchmark looking up the exact same context ids as we just set */ + prandom_seed_state(&prng, i915_selftest.random_seed); + end_time = count; + kt = ktime_get(); + while (end_time--) { + u64 id = i915_prandom_u64_state(&prng); + + if (!__intel_timeline_sync_is_later(&tl, id, 0)) { + mock_timeline_fini(&tl); + pr_err("Lookup of %llu failed\n", id); + return -EINVAL; + } + } + kt = ktime_sub(ktime_get(), kt); + kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); + pr_info("%s: %lu random lookups, %lluns/lookup\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + + mock_timeline_fini(&tl); + cond_resched(); + + mock_timeline_init(&tl, 0); + + /* Benchmark setting the first N (in order) contexts */ + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + __intel_timeline_sync_set(&tl, count++, 0); + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + pr_info("%s: %lu in-order insertions, %lluns/insert\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + + /* Benchmark looking up the exact same context ids as we just set */ + end_time = count; + kt = ktime_get(); + while (end_time--) { + if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) { + pr_err("Lookup of %lu failed\n", end_time); + mock_timeline_fini(&tl); + return -EINVAL; + } + } + kt = ktime_sub(ktime_get(), kt); + pr_info("%s: %lu in-order lookups, %lluns/lookup\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + + mock_timeline_fini(&tl); + cond_resched(); + + mock_timeline_init(&tl, 0); + + /* Benchmark searching for a random context id and maybe changing it */ + prandom_seed_state(&prng, i915_selftest.random_seed); + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + u32 id = random_engine(&prng); + u32 seqno = prandom_u32_state(&prng); + + if (!__intel_timeline_sync_is_later(&tl, id, seqno)) + __intel_timeline_sync_set(&tl, id, seqno); + + count++; + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); + pr_info("%s: %lu repeated insert/lookups, %lluns/op\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + mock_timeline_fini(&tl); + cond_resched(); + + /* Benchmark searching for a known context id and changing the seqno */ + for (last_order = 1, order = 1; order < 32; + ({ int tmp = last_order; last_order = order; order += tmp; })) { + unsigned int mask = BIT(order) - 1; + + mock_timeline_init(&tl, 0); + + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + /* Without assuming too many details of the underlying + * implementation, try to identify its phase-changes + * (if any)! + */ + u64 id = (u64)(count & mask) << order; + + __intel_timeline_sync_is_later(&tl, id, 0); + __intel_timeline_sync_set(&tl, id, 0); + + count++; + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n", + __func__, count, order, + (long long)div64_ul(ktime_to_ns(kt), count)); + mock_timeline_fini(&tl); + cond_resched(); + } + + return 0; +} + +int intel_timeline_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(mock_hwsp_freelist), + SUBTEST(igt_sync), + SUBTEST(bench_sync), + }; + + return i915_subtests(tests, NULL); +} + +static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + if (INTEL_GEN(rq->i915) >= 8) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = addr; + *cs++ = 0; + *cs++ = value; + } else if (INTEL_GEN(rq->i915) >= 4) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = 0; + *cs++ = addr; + *cs++ = value; + } else { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cs++ = addr; + *cs++ = value; + *cs++ = MI_NOOP; + } + + intel_ring_advance(rq, cs); + + return 0; +} + +static struct i915_request * +tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value) +{ + struct i915_request *rq; + int err; + + err = intel_timeline_pin(tl); + if (err) { + rq = ERR_PTR(err); + goto out; + } + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + goto out_unpin; + + i915_request_get(rq); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value); + i915_request_add(rq); + if (err) { + i915_request_put(rq); + rq = ERR_PTR(err); + } + +out_unpin: + intel_timeline_unpin(tl); +out: + if (IS_ERR(rq)) + pr_err("Failed to write to timeline!\n"); + return rq; +} + +static struct intel_timeline * +checked_intel_timeline_create(struct intel_gt *gt) +{ + struct intel_timeline *tl; + + tl = intel_timeline_create(gt, NULL); + if (IS_ERR(tl)) + return tl; + + if (*tl->hwsp_seqno != tl->seqno) { + pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", + *tl->hwsp_seqno, tl->seqno); + intel_timeline_put(tl); + return ERR_PTR(-EINVAL); + } + + return tl; +} + +static int live_hwsp_engine(void *arg) +{ +#define NUM_TIMELINES 4096 + struct intel_gt *gt = arg; + struct intel_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + count = 0; + for_each_engine(engine, gt, id) { + if (!intel_engine_can_store_dword(engine)) + continue; + + intel_engine_pm_get(engine); + + for (n = 0; n < NUM_TIMELINES; n++) { + struct intel_timeline *tl; + struct i915_request *rq; + + tl = checked_intel_timeline_create(gt); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + break; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + intel_timeline_put(tl); + err = PTR_ERR(rq); + break; + } + + timelines[count++] = tl; + i915_request_put(rq); + } + + intel_engine_pm_put(engine); + if (err) + break; + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct intel_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + intel_timeline_put(tl); + } + + kvfree(timelines); + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_alternate(void *arg) +{ +#define NUM_TIMELINES 4096 + struct intel_gt *gt = arg; + struct intel_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots with adjacent + * engines. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + count = 0; + for (n = 0; n < NUM_TIMELINES; n++) { + for_each_engine(engine, gt, id) { + struct intel_timeline *tl; + struct i915_request *rq; + + if (!intel_engine_can_store_dword(engine)) + continue; + + tl = checked_intel_timeline_create(gt); + if (IS_ERR(tl)) { + intel_engine_pm_put(engine); + err = PTR_ERR(tl); + goto out; + } + + intel_engine_pm_get(engine); + rq = tl_write(tl, engine, count); + intel_engine_pm_put(engine); + if (IS_ERR(rq)) { + intel_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + timelines[count++] = tl; + i915_request_put(rq); + } + } + +out: + if (igt_flush_test(gt->i915)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct intel_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + intel_timeline_put(tl); + } + + kvfree(timelines); + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_wrap(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct intel_timeline *tl; + enum intel_engine_id id; + int err = 0; + + /* + * Across a seqno wrap, we need to keep the old cacheline alive for + * foreign GPU references. + */ + + tl = intel_timeline_create(gt, NULL); + if (IS_ERR(tl)) + return PTR_ERR(tl); + + if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) + goto out_free; + + err = intel_timeline_pin(tl); + if (err) + goto out_free; + + for_each_engine(engine, gt, id) { + const u32 *hwsp_seqno[2]; + struct i915_request *rq; + u32 seqno[2]; + + if (!intel_engine_can_store_dword(engine)) + continue; + + intel_engine_pm_get(engine); + rq = i915_request_create(engine->kernel_context); + intel_engine_pm_put(engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + tl->seqno = -4u; + + mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); + err = intel_timeline_get_seqno(tl, rq, &seqno[0]); + mutex_unlock(&tl->mutex); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n", + seqno[0], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[0] = tl->hwsp_seqno; + + mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); + err = intel_timeline_get_seqno(tl, rq, &seqno[1]); + mutex_unlock(&tl->mutex); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n", + seqno[1], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[1] = tl->hwsp_seqno; + + /* With wrap should come a new hwsp */ + GEM_BUG_ON(seqno[1] >= seqno[0]); + GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]); + + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + err = -EIO; + goto out; + } + + if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) { + pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n", + *hwsp_seqno[0], *hwsp_seqno[1], + seqno[0], seqno[1]); + err = -EINVAL; + goto out; + } + + intel_gt_retire_requests(gt); /* recycle HWSP */ + } + +out: + if (igt_flush_test(gt->i915)) + err = -EIO; + + intel_timeline_unpin(tl); +out_free: + intel_timeline_put(tl); + return err; +} + +static int live_hwsp_recycle(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long count; + int err = 0; + + /* + * Check seqno writes into one timeline at a time. We expect to + * recycle the breadcrumb slot between iterations and neither + * want to confuse ourselves or the GPU. + */ + + count = 0; + for_each_engine(engine, gt, id) { + IGT_TIMEOUT(end_time); + + if (!intel_engine_can_store_dword(engine)) + continue; + + intel_engine_pm_get(engine); + + do { + struct intel_timeline *tl; + struct i915_request *rq; + + tl = checked_intel_timeline_create(gt); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + break; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + intel_timeline_put(tl); + err = PTR_ERR(rq); + break; + } + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + i915_request_put(rq); + intel_timeline_put(tl); + err = -EIO; + break; + } + + if (*tl->hwsp_seqno != count) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + count, *tl->hwsp_seqno); + err = -EINVAL; + } + + i915_request_put(rq); + intel_timeline_put(tl); + count++; + + if (err) + break; + } while (!__igt_timeout(end_time, NULL)); + + intel_engine_pm_put(engine); + if (err) + break; + } + + return err; +} + +int intel_timeline_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_hwsp_recycle), + SUBTEST(live_hwsp_engine), + SUBTEST(live_hwsp_alternate), + SUBTEST(live_hwsp_wrap), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c new file mode 100644 index 000000000000..abce6e4ec9c0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -0,0 +1,1289 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "gem/i915_gem_pm.h" +#include "gt/intel_engine_user.h" +#include "gt/intel_gt.h" +#include "i915_selftest.h" +#include "intel_reset.h" + +#include "selftests/igt_flush_test.h" +#include "selftests/igt_reset.h" +#include "selftests/igt_spinner.h" +#include "selftests/mock_drm.h" + +#include "gem/selftests/igt_gem_utils.h" +#include "gem/selftests/mock_context.h" + +static const struct wo_register { + enum intel_platform platform; + u32 reg; +} wo_registers[] = { + { INTEL_GEMINILAKE, 0x731c } +}; + +struct wa_lists { + struct i915_wa_list gt_wa_list; + struct { + struct i915_wa_list wa_list; + struct i915_wa_list ctx_wa_list; + } engine[I915_NUM_ENGINES]; +}; + +static int request_add_sync(struct i915_request *rq, int err) +{ + i915_request_get(rq); + i915_request_add(rq); + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -EIO; + i915_request_put(rq); + + return err; +} + +static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) +{ + int err = 0; + + i915_request_get(rq); + i915_request_add(rq); + if (spin && !igt_wait_for_spinner(spin, rq)) + err = -ETIMEDOUT; + i915_request_put(rq); + + return err; +} + +static void +reference_lists_init(struct intel_gt *gt, struct wa_lists *lists) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + memset(lists, 0, sizeof(*lists)); + + wa_init_start(&lists->gt_wa_list, "GT_REF", "global"); + gt_init_workarounds(gt->i915, &lists->gt_wa_list); + wa_init_finish(&lists->gt_wa_list); + + for_each_engine(engine, gt, id) { + struct i915_wa_list *wal = &lists->engine[id].wa_list; + + wa_init_start(wal, "REF", engine->name); + engine_init_workarounds(engine, wal); + wa_init_finish(wal); + + __intel_engine_init_ctx_wa(engine, + &lists->engine[id].ctx_wa_list, + "CTX_REF"); + } +} + +static void +reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + intel_wa_list_free(&lists->engine[id].wa_list); + + intel_wa_list_free(&lists->gt_wa_list); +} + +static struct drm_i915_gem_object * +read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) +{ + const u32 base = engine->mmio_base; + struct drm_i915_gem_object *result; + struct i915_request *rq; + struct i915_vma *vma; + u32 srm, *cs; + int err; + int i; + + result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); + if (IS_ERR(result)) + return result; + + i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC); + + cs = i915_gem_object_pin_map(result, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_obj; + } + memset(cs, 0xc5, PAGE_SIZE); + i915_gem_object_flush_map(result); + i915_gem_object_unpin_map(result); + + vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_obj; + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_pin; + } + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, true); + if (err == 0) + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); + if (err) + goto err_req; + + srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; + if (INTEL_GEN(ctx->i915) >= 8) + srm++; + + cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_req; + } + + for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { + *cs++ = srm; + *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i)); + *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; + *cs++ = 0; + } + intel_ring_advance(rq, cs); + + i915_request_add(rq); + i915_vma_unpin(vma); + + return result; + +err_req: + i915_request_add(rq); +err_pin: + i915_vma_unpin(vma); +err_obj: + i915_gem_object_put(result); + return ERR_PTR(err); +} + +static u32 +get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i) +{ + i915_reg_t reg = i < engine->whitelist.count ? + engine->whitelist.list[i].reg : + RING_NOPID(engine->mmio_base); + + return i915_mmio_reg_offset(reg); +} + +static void +print_results(const struct intel_engine_cs *engine, const u32 *results) +{ + unsigned int i; + + for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { + u32 expected = get_whitelist_reg(engine, i); + u32 actual = results[i]; + + pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n", + i, expected, actual); + } +} + +static int check_whitelist(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *results; + struct intel_wedge_me wedge; + u32 *vaddr; + int err; + int i; + + results = read_nonprivs(ctx, engine); + if (IS_ERR(results)) + return PTR_ERR(results); + + err = 0; + i915_gem_object_lock(results); + intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */ + err = i915_gem_object_set_to_cpu_domain(results, false); + i915_gem_object_unlock(results); + if (intel_gt_is_wedged(engine->gt)) + err = -EIO; + if (err) + goto out_put; + + vaddr = i915_gem_object_pin_map(results, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto out_put; + } + + for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { + u32 expected = get_whitelist_reg(engine, i); + u32 actual = vaddr[i]; + + if (expected != actual) { + print_results(engine, vaddr); + pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n", + i, expected, actual); + + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_map(results); +out_put: + i915_gem_object_put(results); + return err; +} + +static int do_device_reset(struct intel_engine_cs *engine) +{ + intel_gt_reset(engine->gt, engine->mask, "live_workarounds"); + return 0; +} + +static int do_engine_reset(struct intel_engine_cs *engine) +{ + return intel_engine_reset(engine, "live_workarounds"); +} + +static int +switch_to_scratch_context(struct intel_engine_cs *engine, + struct igt_spinner *spin) +{ + struct i915_gem_context *ctx; + struct intel_context *ce; + struct i915_request *rq; + int err = 0; + + ctx = kernel_context(engine->i915); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + GEM_BUG_ON(i915_gem_context_is_bannable(ctx)); + + ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); + GEM_BUG_ON(IS_ERR(ce)); + + rq = igt_spinner_create_request(spin, ce, MI_NOOP); + + intel_context_put(ce); + + if (IS_ERR(rq)) { + spin = NULL; + err = PTR_ERR(rq); + goto err; + } + + err = request_add_spin(rq, spin); +err: + if (err && spin) + igt_spinner_end(spin); + + kernel_context_close(ctx); + return err; +} + +static int check_whitelist_across_reset(struct intel_engine_cs *engine, + int (*reset)(struct intel_engine_cs *), + const char *name) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_gem_context *ctx, *tmp; + struct igt_spinner spin; + intel_wakeref_t wakeref; + int err; + + pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n", + engine->whitelist.count, engine->name, name); + + ctx = kernel_context(i915); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + err = igt_spinner_init(&spin, engine->gt); + if (err) + goto out_ctx; + + err = check_whitelist(ctx, engine); + if (err) { + pr_err("Invalid whitelist *before* %s reset!\n", name); + goto out_spin; + } + + err = switch_to_scratch_context(engine, &spin); + if (err) + goto out_spin; + + with_intel_runtime_pm(engine->uncore->rpm, wakeref) + err = reset(engine); + + igt_spinner_end(&spin); + + if (err) { + pr_err("%s reset failed\n", name); + goto out_spin; + } + + err = check_whitelist(ctx, engine); + if (err) { + pr_err("Whitelist not preserved in context across %s reset!\n", + name); + goto out_spin; + } + + tmp = kernel_context(i915); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + goto out_spin; + } + kernel_context_close(ctx); + ctx = tmp; + + err = check_whitelist(ctx, engine); + if (err) { + pr_err("Invalid whitelist *after* %s reset in fresh context!\n", + name); + goto out_spin; + } + +out_spin: + igt_spinner_fini(&spin); +out_ctx: + kernel_context_close(ctx); + return err; +} + +static struct i915_vma *create_batch(struct i915_gem_context *ctx) +{ + struct drm_i915_gem_object *obj; + struct i915_address_space *vm; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vm = i915_gem_context_get_vm_rcu(ctx); + vma = i915_vma_instance(obj, vm, NULL); + i915_vm_put(vm); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err_obj; + + return vma; + +err_obj: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static u32 reg_write(u32 old, u32 new, u32 rsvd) +{ + if (rsvd == 0x0000ffff) { + old &= ~(new >> 16); + old |= new & (new >> 16); + } else { + old &= ~rsvd; + old |= new & rsvd; + } + + return old; +} + +static bool wo_register(struct intel_engine_cs *engine, u32 reg) +{ + enum intel_platform platform = INTEL_INFO(engine->i915)->platform; + int i; + + if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == + RING_FORCE_TO_NONPRIV_ACCESS_WR) + return true; + + for (i = 0; i < ARRAY_SIZE(wo_registers); i++) { + if (wo_registers[i].platform == platform && + wo_registers[i].reg == reg) + return true; + } + + return false; +} + +static bool ro_register(u32 reg) +{ + if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == + RING_FORCE_TO_NONPRIV_ACCESS_RD) + return true; + + return false; +} + +static int whitelist_writable_count(struct intel_engine_cs *engine) +{ + int count = engine->whitelist.count; + int i; + + for (i = 0; i < engine->whitelist.count; i++) { + u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); + + if (ro_register(reg)) + count--; + } + + return count; +} + +static int check_dirty_whitelist(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + const u32 values[] = { + 0x00000000, + 0x01010101, + 0x10100101, + 0x03030303, + 0x30300303, + 0x05050505, + 0x50500505, + 0x0f0f0f0f, + 0xf00ff00f, + 0x10101010, + 0xf0f01010, + 0x30303030, + 0xa0a03030, + 0x50505050, + 0xc0c05050, + 0xf0f0f0f0, + 0x11111111, + 0x33333333, + 0x55555555, + 0x0000ffff, + 0x00ff00ff, + 0xff0000ff, + 0xffff00ff, + 0xffffffff, + }; + struct i915_address_space *vm; + struct i915_vma *scratch; + struct i915_vma *batch; + int err = 0, i, v; + u32 *cs, *results; + + vm = i915_gem_context_get_vm_rcu(ctx); + scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1); + i915_vm_put(vm); + if (IS_ERR(scratch)) + return PTR_ERR(scratch); + + batch = create_batch(ctx); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_scratch; + } + + for (i = 0; i < engine->whitelist.count; i++) { + u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); + u64 addr = scratch->node.start; + struct i915_request *rq; + u32 srm, lrm, rsvd; + u32 expect; + int idx; + bool ro_reg; + + if (wo_register(engine, reg)) + continue; + + ro_reg = ro_register(reg); + + /* Clear non priv flags */ + reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; + + srm = MI_STORE_REGISTER_MEM; + lrm = MI_LOAD_REGISTER_MEM; + if (INTEL_GEN(ctx->i915) >= 8) + lrm++, srm++; + + pr_debug("%s: Writing garbage to %x\n", + engine->name, reg); + + cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto out_batch; + } + + /* SRM original */ + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(addr); + *cs++ = upper_32_bits(addr); + + idx = 1; + for (v = 0; v < ARRAY_SIZE(values); v++) { + /* LRI garbage */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = reg; + *cs++ = values[v]; + + /* SRM result */ + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(addr + sizeof(u32) * idx); + *cs++ = upper_32_bits(addr + sizeof(u32) * idx); + idx++; + } + for (v = 0; v < ARRAY_SIZE(values); v++) { + /* LRI garbage */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = reg; + *cs++ = ~values[v]; + + /* SRM result */ + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(addr + sizeof(u32) * idx); + *cs++ = upper_32_bits(addr + sizeof(u32) * idx); + idx++; + } + GEM_BUG_ON(idx * sizeof(u32) > scratch->size); + + /* LRM original -- don't leave garbage in the context! */ + *cs++ = lrm; + *cs++ = reg; + *cs++ = lower_32_bits(addr); + *cs++ = upper_32_bits(addr); + + *cs++ = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(batch->obj); + i915_gem_object_unpin_map(batch->obj); + intel_gt_chipset_flush(engine->gt); + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_batch; + } + + if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ + err = engine->emit_init_breadcrumb(rq); + if (err) + goto err_request; + } + + i915_vma_lock(batch); + err = i915_request_await_object(rq, batch->obj, false); + if (err == 0) + err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); + if (err) + goto err_request; + + err = engine->emit_bb_start(rq, + batch->node.start, PAGE_SIZE, + 0); + if (err) + goto err_request; + +err_request: + err = request_add_sync(rq, err); + if (err) { + pr_err("%s: Futzing %x timedout; cancelling test\n", + engine->name, reg); + intel_gt_set_wedged(engine->gt); + goto out_batch; + } + + results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(results)) { + err = PTR_ERR(results); + goto out_batch; + } + + GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); + if (!ro_reg) { + /* detect write masking */ + rsvd = results[ARRAY_SIZE(values)]; + if (!rsvd) { + pr_err("%s: Unable to write to whitelisted register %x\n", + engine->name, reg); + err = -EINVAL; + goto out_unpin; + } + } + + expect = results[0]; + idx = 1; + for (v = 0; v < ARRAY_SIZE(values); v++) { + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, values[v], rsvd); + + if (results[idx] != expect) + err++; + idx++; + } + for (v = 0; v < ARRAY_SIZE(values); v++) { + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, ~values[v], rsvd); + + if (results[idx] != expect) + err++; + idx++; + } + if (err) { + pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n", + engine->name, err, reg); + + if (ro_reg) + pr_info("%s: Whitelisted read-only register: %x, original value %08x\n", + engine->name, reg, results[0]); + else + pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", + engine->name, reg, results[0], rsvd); + + expect = results[0]; + idx = 1; + for (v = 0; v < ARRAY_SIZE(values); v++) { + u32 w = values[v]; + + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, w, rsvd); + pr_info("Wrote %08x, read %08x, expect %08x\n", + w, results[idx], expect); + idx++; + } + for (v = 0; v < ARRAY_SIZE(values); v++) { + u32 w = ~values[v]; + + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, w, rsvd); + pr_info("Wrote %08x, read %08x, expect %08x\n", + w, results[idx], expect); + idx++; + } + + err = -EINVAL; + } +out_unpin: + i915_gem_object_unpin_map(scratch->obj); + if (err) + break; + } + + if (igt_flush_test(ctx->i915)) + err = -EIO; +out_batch: + i915_vma_unpin_and_release(&batch, 0); +out_scratch: + i915_vma_unpin_and_release(&scratch, 0); + return err; +} + +static int live_dirty_whitelist(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + struct drm_file *file; + int err = 0; + + /* Can the user write to the whitelisted registers? */ + + if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */ + return 0; + + file = mock_file(gt->i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = live_context(gt->i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_file; + } + + for_each_engine(engine, gt, id) { + if (engine->whitelist.count == 0) + continue; + + err = check_dirty_whitelist(ctx, engine); + if (err) + goto out_file; + } + +out_file: + mock_file_free(gt->i915, file); + return err; +} + +static int live_reset_whitelist(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* If we reset the gpu, we should not lose the RING_NONPRIV */ + igt_global_reset_lock(gt); + + for_each_engine(engine, gt, id) { + if (engine->whitelist.count == 0) + continue; + + if (intel_has_reset_engine(gt)) { + err = check_whitelist_across_reset(engine, + do_engine_reset, + "engine"); + if (err) + goto out; + } + + if (intel_has_gpu_reset(gt)) { + err = check_whitelist_across_reset(engine, + do_device_reset, + "device"); + if (err) + goto out; + } + } + +out: + igt_global_reset_unlock(gt); + return err; +} + +static int read_whitelisted_registers(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct i915_vma *results) +{ + struct i915_request *rq; + int i, err = 0; + u32 srm, *cs; + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_vma_lock(results); + err = i915_request_await_object(rq, results->obj, true); + if (err == 0) + err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(results); + if (err) + goto err_req; + + srm = MI_STORE_REGISTER_MEM; + if (INTEL_GEN(ctx->i915) >= 8) + srm++; + + cs = intel_ring_begin(rq, 4 * engine->whitelist.count); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_req; + } + + for (i = 0; i < engine->whitelist.count; i++) { + u64 offset = results->node.start + sizeof(u32) * i; + u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); + + /* Clear non priv flags */ + reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; + + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + } + intel_ring_advance(rq, cs); + +err_req: + return request_add_sync(rq, err); +} + +static int scrub_whitelisted_registers(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct i915_request *rq; + struct i915_vma *batch; + int i, err = 0; + u32 *cs; + + batch = create_batch(ctx); + if (IS_ERR(batch)) + return PTR_ERR(batch); + + cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_batch; + } + + *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine)); + for (i = 0; i < engine->whitelist.count; i++) { + u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); + + if (ro_register(reg)) + continue; + + /* Clear non priv flags */ + reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; + + *cs++ = reg; + *cs++ = 0xffffffff; + } + *cs++ = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(batch->obj); + intel_gt_chipset_flush(engine->gt); + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ + err = engine->emit_init_breadcrumb(rq); + if (err) + goto err_request; + } + + i915_vma_lock(batch); + err = i915_request_await_object(rq, batch->obj, false); + if (err == 0) + err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); + if (err) + goto err_request; + + /* Perform the writes from an unprivileged "user" batch */ + err = engine->emit_bb_start(rq, batch->node.start, 0, 0); + +err_request: + err = request_add_sync(rq, err); + +err_unpin: + i915_gem_object_unpin_map(batch->obj); +err_batch: + i915_vma_unpin_and_release(&batch, 0); + return err; +} + +struct regmask { + i915_reg_t reg; + unsigned long gen_mask; +}; + +static bool find_reg(struct drm_i915_private *i915, + i915_reg_t reg, + const struct regmask *tbl, + unsigned long count) +{ + u32 offset = i915_mmio_reg_offset(reg); + + while (count--) { + if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask && + i915_mmio_reg_offset(tbl->reg) == offset) + return true; + tbl++; + } + + return false; +} + +static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg) +{ + /* Alas, we must pardon some whitelists. Mistakes already made */ + static const struct regmask pardon[] = { + { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) }, + { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) }, + }; + + return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon)); +} + +static bool result_eq(struct intel_engine_cs *engine, + u32 a, u32 b, i915_reg_t reg) +{ + if (a != b && !pardon_reg(engine->i915, reg)) { + pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n", + i915_mmio_reg_offset(reg), a, b); + return false; + } + + return true; +} + +static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg) +{ + /* Some registers do not seem to behave and our writes unreadable */ + static const struct regmask wo[] = { + { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) }, + }; + + return find_reg(i915, reg, wo, ARRAY_SIZE(wo)); +} + +static bool result_neq(struct intel_engine_cs *engine, + u32 a, u32 b, i915_reg_t reg) +{ + if (a == b && !writeonly_reg(engine->i915, reg)) { + pr_err("Whitelist register 0x%4x:%08x was unwritable\n", + i915_mmio_reg_offset(reg), a); + return false; + } + + return true; +} + +static int +check_whitelisted_registers(struct intel_engine_cs *engine, + struct i915_vma *A, + struct i915_vma *B, + bool (*fn)(struct intel_engine_cs *engine, + u32 a, u32 b, + i915_reg_t reg)) +{ + u32 *a, *b; + int i, err; + + a = i915_gem_object_pin_map(A->obj, I915_MAP_WB); + if (IS_ERR(a)) + return PTR_ERR(a); + + b = i915_gem_object_pin_map(B->obj, I915_MAP_WB); + if (IS_ERR(b)) { + err = PTR_ERR(b); + goto err_a; + } + + err = 0; + for (i = 0; i < engine->whitelist.count; i++) { + const struct i915_wa *wa = &engine->whitelist.list[i]; + + if (i915_mmio_reg_offset(wa->reg) & + RING_FORCE_TO_NONPRIV_ACCESS_RD) + continue; + + if (!fn(engine, a[i], b[i], wa->reg)) + err = -EINVAL; + } + + i915_gem_object_unpin_map(B->obj); +err_a: + i915_gem_object_unpin_map(A->obj); + return err; +} + +static int live_isolated_whitelist(void *arg) +{ + struct intel_gt *gt = arg; + struct { + struct i915_gem_context *ctx; + struct i915_vma *scratch[2]; + } client[2] = {}; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int i, err = 0; + + /* + * Check that a write into a whitelist register works, but + * invisible to a second context. + */ + + if (!intel_engines_has_context_isolation(gt->i915)) + return 0; + + for (i = 0; i < ARRAY_SIZE(client); i++) { + struct i915_address_space *vm; + struct i915_gem_context *c; + + c = kernel_context(gt->i915); + if (IS_ERR(c)) { + err = PTR_ERR(c); + goto err; + } + + vm = i915_gem_context_get_vm_rcu(c); + + client[i].scratch[0] = create_scratch(vm, 1024); + if (IS_ERR(client[i].scratch[0])) { + err = PTR_ERR(client[i].scratch[0]); + i915_vm_put(vm); + kernel_context_close(c); + goto err; + } + + client[i].scratch[1] = create_scratch(vm, 1024); + if (IS_ERR(client[i].scratch[1])) { + err = PTR_ERR(client[i].scratch[1]); + i915_vma_unpin_and_release(&client[i].scratch[0], 0); + i915_vm_put(vm); + kernel_context_close(c); + goto err; + } + + client[i].ctx = c; + i915_vm_put(vm); + } + + for_each_engine(engine, gt, id) { + if (!engine->kernel_context->vm) + continue; + + if (!whitelist_writable_count(engine)) + continue; + + /* Read default values */ + err = read_whitelisted_registers(client[0].ctx, engine, + client[0].scratch[0]); + if (err) + goto err; + + /* Try to overwrite registers (should only affect ctx0) */ + err = scrub_whitelisted_registers(client[0].ctx, engine); + if (err) + goto err; + + /* Read values from ctx1, we expect these to be defaults */ + err = read_whitelisted_registers(client[1].ctx, engine, + client[1].scratch[0]); + if (err) + goto err; + + /* Verify that both reads return the same default values */ + err = check_whitelisted_registers(engine, + client[0].scratch[0], + client[1].scratch[0], + result_eq); + if (err) + goto err; + + /* Read back the updated values in ctx0 */ + err = read_whitelisted_registers(client[0].ctx, engine, + client[0].scratch[1]); + if (err) + goto err; + + /* User should be granted privilege to overwhite regs */ + err = check_whitelisted_registers(engine, + client[0].scratch[0], + client[0].scratch[1], + result_neq); + if (err) + goto err; + } + +err: + for (i = 0; i < ARRAY_SIZE(client); i++) { + if (!client[i].ctx) + break; + + i915_vma_unpin_and_release(&client[i].scratch[1], 0); + i915_vma_unpin_and_release(&client[i].scratch[0], 0); + kernel_context_close(client[i].ctx); + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + + return err; +} + +static bool +verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists, + const char *str) +{ + struct drm_i915_private *i915 = ctx->i915; + struct i915_gem_engines_iter it; + struct intel_context *ce; + bool ok = true; + + ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str); + + for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) { + enum intel_engine_id id = ce->engine->id; + + ok &= engine_wa_list_verify(ce, + &lists->engine[id].wa_list, + str) == 0; + + ok &= engine_wa_list_verify(ce, + &lists->engine[id].ctx_wa_list, + str) == 0; + } + + return ok; +} + +static int +live_gpu_reset_workarounds(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx; + intel_wakeref_t wakeref; + struct wa_lists lists; + bool ok; + + if (!intel_has_gpu_reset(gt)) + return 0; + + ctx = kernel_context(gt->i915); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + i915_gem_context_lock_engines(ctx); + + pr_info("Verifying after GPU reset...\n"); + + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + reference_lists_init(gt, &lists); + + ok = verify_wa_lists(ctx, &lists, "before reset"); + if (!ok) + goto out; + + intel_gt_reset(gt, ALL_ENGINES, "live_workarounds"); + + ok = verify_wa_lists(ctx, &lists, "after reset"); + +out: + i915_gem_context_unlock_engines(ctx); + kernel_context_close(ctx); + reference_lists_fini(gt, &lists); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + igt_global_reset_unlock(gt); + + return ok ? 0 : -ESRCH; +} + +static int +live_engine_reset_workarounds(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_engines_iter it; + struct i915_gem_context *ctx; + struct intel_context *ce; + struct igt_spinner spin; + struct i915_request *rq; + intel_wakeref_t wakeref; + struct wa_lists lists; + int ret = 0; + + if (!intel_has_reset_engine(gt)) + return 0; + + ctx = kernel_context(gt->i915); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + reference_lists_init(gt, &lists); + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + struct intel_engine_cs *engine = ce->engine; + bool ok; + + pr_info("Verifying after %s reset...\n", engine->name); + + ok = verify_wa_lists(ctx, &lists, "before reset"); + if (!ok) { + ret = -ESRCH; + goto err; + } + + intel_engine_reset(engine, "live_workarounds"); + + ok = verify_wa_lists(ctx, &lists, "after idle reset"); + if (!ok) { + ret = -ESRCH; + goto err; + } + + ret = igt_spinner_init(&spin, engine->gt); + if (ret) + goto err; + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + igt_spinner_fini(&spin); + goto err; + } + + ret = request_add_spin(rq, &spin); + if (ret) { + pr_err("Spinner failed to start\n"); + igt_spinner_fini(&spin); + goto err; + } + + intel_engine_reset(engine, "live_workarounds"); + + igt_spinner_end(&spin); + igt_spinner_fini(&spin); + + ok = verify_wa_lists(ctx, &lists, "after busy reset"); + if (!ok) { + ret = -ESRCH; + goto err; + } + } +err: + i915_gem_context_unlock_engines(ctx); + reference_lists_fini(gt, &lists); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + igt_global_reset_unlock(gt); + kernel_context_close(ctx); + + igt_flush_test(gt->i915); + + return ret; +} + +int intel_workarounds_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_dirty_whitelist), + SUBTEST(live_reset_whitelist), + SUBTEST(live_isolated_whitelist), + SUBTEST(live_gpu_reset_workarounds), + SUBTEST(live_engine_reset_workarounds), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c new file mode 100644 index 000000000000..2a77c051f36a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017-2018 Intel Corporation + */ + +#include "../intel_timeline.h" + +#include "mock_timeline.h" + +void mock_timeline_init(struct intel_timeline *timeline, u64 context) +{ + timeline->gt = NULL; + timeline->fence_context = context; + + mutex_init(&timeline->mutex); + + INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex); + INIT_LIST_HEAD(&timeline->requests); + + i915_syncmap_init(&timeline->sync); + + INIT_LIST_HEAD(&timeline->link); +} + +void mock_timeline_fini(struct intel_timeline *timeline) +{ + i915_syncmap_free(&timeline->sync); +} diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h new file mode 100644 index 000000000000..689efc66c908 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017-2018 Intel Corporation + */ + +#ifndef __MOCK_TIMELINE__ +#define __MOCK_TIMELINE__ + +struct intel_timeline; + +void mock_timeline_init(struct intel_timeline *timeline, u64 context); +void mock_timeline_fini(struct intel_timeline *timeline); + +#endif /* !__MOCK_TIMELINE__ */ diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile new file mode 100644 index 000000000000..bec94d434cb6 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/Makefile @@ -0,0 +1,5 @@ +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/../.. + +# Extra header tests +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c new file mode 100644 index 000000000000..3ee4a4e7689d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -0,0 +1,706 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#include "gt/intel_gt.h" +#include "gt/intel_gt_irq.h" +#include "gt/intel_gt_pm_irq.h" +#include "intel_guc.h" +#include "intel_guc_ads.h" +#include "intel_guc_submission.h" +#include "i915_drv.h" + +/** + * DOC: GuC + * + * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is + * designed to offload some of the functionality usually performed by the host + * driver; currently the main operations it can take care of are: + * + * - Authentication of the HuC, which is required to fully enable HuC usage. + * - Low latency graphics context scheduling (a.k.a. GuC submission). + * - GT Power management. + * + * The enable_guc module parameter can be used to select which of those + * operations to enable within GuC. Note that not all the operations are + * supported on all gen9+ platforms. + * + * Enabling the GuC is not mandatory and therefore the firmware is only loaded + * if at least one of the operations is selected. However, not loading the GuC + * might result in the loss of some features that do require the GuC (currently + * just the HuC, but more are expected to land in the future). + */ + +static void gen8_guc_raise_irq(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); +} + +static void gen11_guc_raise_irq(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0); +} + +static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) +{ + GEM_BUG_ON(!guc->send_regs.base); + GEM_BUG_ON(!guc->send_regs.count); + GEM_BUG_ON(i >= guc->send_regs.count); + + return _MMIO(guc->send_regs.base + 4 * i); +} + +void intel_guc_init_send_regs(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + enum forcewake_domains fw_domains = 0; + unsigned int i; + + if (INTEL_GEN(gt->i915) >= 11) { + guc->send_regs.base = + i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); + guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; + } else { + guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); + guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; + BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); + } + + for (i = 0; i < guc->send_regs.count; i++) { + fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore, + guc_send_reg(guc, i), + FW_REG_READ | FW_REG_WRITE); + } + guc->send_regs.fw_domains = fw_domains; +} + +static void gen9_reset_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + assert_rpm_wakelock_held(>->i915->runtime_pm); + + spin_lock_irq(>->irq_lock); + gen6_gt_pm_reset_iir(gt, gt->pm_guc_events); + spin_unlock_irq(>->irq_lock); +} + +static void gen9_enable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + assert_rpm_wakelock_held(>->i915->runtime_pm); + + spin_lock_irq(>->irq_lock); + if (!guc->interrupts.enabled) { + WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & + gt->pm_guc_events); + guc->interrupts.enabled = true; + gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); + } + spin_unlock_irq(>->irq_lock); +} + +static void gen9_disable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + assert_rpm_wakelock_held(>->i915->runtime_pm); + + spin_lock_irq(>->irq_lock); + guc->interrupts.enabled = false; + + gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); + + spin_unlock_irq(>->irq_lock); + intel_synchronize_irq(gt->i915); + + gen9_reset_guc_interrupts(guc); +} + +static void gen11_reset_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->irq_lock); + gen11_gt_reset_one_iir(gt, 0, GEN11_GUC); + spin_unlock_irq(>->irq_lock); +} + +static void gen11_enable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->irq_lock); + if (!guc->interrupts.enabled) { + u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); + + WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_ENABLE, events); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_MASK, ~events); + guc->interrupts.enabled = true; + } + spin_unlock_irq(>->irq_lock); +} + +static void gen11_disable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->irq_lock); + guc->interrupts.enabled = false; + + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); + + spin_unlock_irq(>->irq_lock); + intel_synchronize_irq(gt->i915); + + gen11_reset_guc_interrupts(guc); +} + +void intel_guc_init_early(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + + intel_guc_fw_init_early(guc); + intel_guc_ct_init_early(&guc->ct); + intel_guc_log_init_early(&guc->log); + intel_guc_submission_init_early(guc); + + mutex_init(&guc->send_mutex); + spin_lock_init(&guc->irq_lock); + guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; + if (INTEL_GEN(i915) >= 11) { + guc->notify = gen11_guc_raise_irq; + guc->interrupts.reset = gen11_reset_guc_interrupts; + guc->interrupts.enable = gen11_enable_guc_interrupts; + guc->interrupts.disable = gen11_disable_guc_interrupts; + } else { + guc->notify = gen8_guc_raise_irq; + guc->interrupts.reset = gen9_reset_guc_interrupts; + guc->interrupts.enable = gen9_enable_guc_interrupts; + guc->interrupts.disable = gen9_disable_guc_interrupts; + } +} + +static u32 guc_ctl_debug_flags(struct intel_guc *guc) +{ + u32 level = intel_guc_log_get_level(&guc->log); + u32 flags = 0; + + if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) + flags |= GUC_LOG_DISABLED; + else + flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << + GUC_LOG_VERBOSITY_SHIFT; + + return flags; +} + +static u32 guc_ctl_feature_flags(struct intel_guc *guc) +{ + u32 flags = 0; + + if (!intel_guc_is_submission_supported(guc)) + flags |= GUC_CTL_DISABLE_SCHEDULER; + + return flags; +} + +static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) +{ + u32 flags = 0; + + if (intel_guc_is_submission_supported(guc)) { + u32 ctxnum, base; + + base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); + ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16; + + base >>= PAGE_SHIFT; + flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) | + (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT); + } + return flags; +} + +static u32 guc_ctl_log_params_flags(struct intel_guc *guc) +{ + u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT; + u32 flags; + + #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0) + #define UNIT SZ_1M + #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE + #else + #define UNIT SZ_4K + #define FLAG 0 + #endif + + BUILD_BUG_ON(!CRASH_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT)); + BUILD_BUG_ON(!DPC_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT)); + BUILD_BUG_ON(!ISR_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT)); + + BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT)); + BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT)); + BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT)); + + flags = GUC_LOG_VALID | + GUC_LOG_NOTIFY_ON_HALF_FULL | + FLAG | + ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) | + ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) | + ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) | + (offset << GUC_LOG_BUF_ADDR_SHIFT); + + #undef UNIT + #undef FLAG + + return flags; +} + +static u32 guc_ctl_ads_flags(struct intel_guc *guc) +{ + u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; + u32 flags = ads << GUC_ADS_ADDR_SHIFT; + + return flags; +} + +/* + * Initialise the GuC parameter block before starting the firmware + * transfer. These parameters are read by the firmware on startup + * and cannot be changed thereafter. + */ +static void guc_init_params(struct intel_guc *guc) +{ + u32 *params = guc->params; + int i; + + BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); + + params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); + params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); + params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); + params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); + params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); + + for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) + DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); +} + +/* + * Initialise the GuC parameter block before starting the firmware + * transfer. These parameters are read by the firmware on startup + * and cannot be changed thereafter. + */ +void intel_guc_write_params(struct intel_guc *guc) +{ + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; + int i; + + /* + * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and + * they are power context saved so it's ok to release forcewake + * when we are done here and take it again at xfer time. + */ + intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER); + + intel_uncore_write(uncore, SOFT_SCRATCH(0), 0); + + for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) + intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER); +} + +int intel_guc_init(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + int ret; + + ret = intel_uc_fw_init(&guc->fw); + if (ret) + goto err_fetch; + + ret = intel_guc_log_create(&guc->log); + if (ret) + goto err_fw; + + ret = intel_guc_ads_create(guc); + if (ret) + goto err_log; + GEM_BUG_ON(!guc->ads_vma); + + ret = intel_guc_ct_init(&guc->ct); + if (ret) + goto err_ads; + + if (intel_guc_is_submission_supported(guc)) { + /* + * This is stuff we need to have available at fw load time + * if we are planning to enable submission later + */ + ret = intel_guc_submission_init(guc); + if (ret) + goto err_ct; + } + + /* now that everything is perma-pinned, initialize the parameters */ + guc_init_params(guc); + + /* We need to notify the guc whenever we change the GGTT */ + i915_ggtt_enable_guc(gt->ggtt); + + return 0; + +err_ct: + intel_guc_ct_fini(&guc->ct); +err_ads: + intel_guc_ads_destroy(guc); +err_log: + intel_guc_log_destroy(&guc->log); +err_fw: + intel_uc_fw_fini(&guc->fw); +err_fetch: + intel_uc_fw_cleanup_fetch(&guc->fw); + DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret); + return ret; +} + +void intel_guc_fini(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + if (!intel_uc_fw_is_available(&guc->fw)) + return; + + i915_ggtt_disable_guc(gt->ggtt); + + if (intel_guc_is_submission_supported(guc)) + intel_guc_submission_fini(guc); + + intel_guc_ct_fini(&guc->ct); + + intel_guc_ads_destroy(guc); + intel_guc_log_destroy(&guc->log); + intel_uc_fw_fini(&guc->fw); + intel_uc_fw_cleanup_fetch(&guc->fw); +} + +int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) +{ + WARN(1, "Unexpected send: action=%#x\n", *action); + return -ENODEV; +} + +void intel_guc_to_host_event_handler_nop(struct intel_guc *guc) +{ + WARN(1, "Unexpected event: no suitable handler\n"); +} + +/* + * This function implements the MMIO based host to GuC interface. + */ +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) +{ + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; + u32 status; + int i; + int ret; + + GEM_BUG_ON(!len); + GEM_BUG_ON(len > guc->send_regs.count); + + /* We expect only action code */ + GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK); + + /* If CT is available, we expect to use MMIO only during init/fini */ + GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && + *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); + + mutex_lock(&guc->send_mutex); + intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains); + + for (i = 0; i < len; i++) + intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]); + + intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1)); + + intel_guc_notify(guc); + + /* + * No GuC command should ever take longer than 10ms. + * Fast commands should still complete in 10us. + */ + ret = __intel_wait_for_register_fw(uncore, + guc_send_reg(guc, 0), + INTEL_GUC_MSG_TYPE_MASK, + INTEL_GUC_MSG_TYPE_RESPONSE << + INTEL_GUC_MSG_TYPE_SHIFT, + 10, 10, &status); + /* If GuC explicitly returned an error, convert it to -EIO */ + if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status)) + ret = -EIO; + + if (ret) { + DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n", + action[0], ret, status); + goto out; + } + + if (response_buf) { + int count = min(response_buf_size, guc->send_regs.count - 1); + + for (i = 0; i < count; i++) + response_buf[i] = intel_uncore_read(uncore, + guc_send_reg(guc, i + 1)); + } + + /* Use data from the GuC response as our return value */ + ret = INTEL_GUC_MSG_TO_DATA(status); + +out: + intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains); + mutex_unlock(&guc->send_mutex); + + return ret; +} + +int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, + const u32 *payload, u32 len) +{ + u32 msg; + + if (unlikely(!len)) + return -EPROTO; + + /* Make sure to handle only enabled messages */ + msg = payload[0] & guc->msg_enabled_mask; + + if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) + intel_guc_log_handle_flush_event(&guc->log); + + return 0; +} + +int intel_guc_sample_forcewake(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; + u32 action[2]; + + action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; + /* WaRsDisableCoarsePowerGating:skl,cnl */ + if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) + action[1] = 0; + else + /* bit 0 and 1 are for Render and Media domain separately */ + action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +/** + * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode + * @guc: intel_guc structure + * @rsa_offset: rsa offset w.r.t ggtt base of huc vma + * + * Triggers a HuC firmware authentication request to the GuC via intel_guc_send + * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by + * intel_huc_auth(). + * + * Return: non-zero code on error + */ +int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) +{ + u32 action[] = { + INTEL_GUC_ACTION_AUTHENTICATE_HUC, + rsa_offset + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +/** + * intel_guc_suspend() - notify GuC entering suspend state + * @guc: the guc + */ +int intel_guc_suspend(struct intel_guc *guc) +{ + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; + int ret; + u32 status; + u32 action[] = { + INTEL_GUC_ACTION_ENTER_S_STATE, + GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */ + }; + + /* + * If GuC communication is enabled but submission is not supported, + * we do not need to suspend the GuC. + */ + if (!intel_guc_submission_is_enabled(guc)) + return 0; + + /* + * The ENTER_S_STATE action queues the save/restore operation in GuC FW + * and then returns, so waiting on the H2G is not enough to guarantee + * GuC is done. When all the processing is done, GuC writes + * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll + * on that. Note that GuC does not ensure that the value in the register + * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is + * in progress so we need to take care of that ourselves as well. + */ + + intel_uncore_write(uncore, SOFT_SCRATCH(14), + INTEL_GUC_SLEEP_STATE_INVALID_MASK); + + ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); + if (ret) + return ret; + + ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14), + INTEL_GUC_SLEEP_STATE_INVALID_MASK, + 0, 0, 10, &status); + if (ret) + return ret; + + if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) { + DRM_ERROR("GuC failed to change sleep state. " + "action=0x%x, err=%u\n", + action[0], status); + return -EIO; + } + + return 0; +} + +/** + * intel_guc_reset_engine() - ask GuC to reset an engine + * @guc: intel_guc structure + * @engine: engine to be reset + */ +int intel_guc_reset_engine(struct intel_guc *guc, + struct intel_engine_cs *engine) +{ + /* XXX: to be implemented with submission interface rework */ + + return -ENODEV; +} + +/** + * intel_guc_resume() - notify GuC resuming from suspend state + * @guc: the guc + */ +int intel_guc_resume(struct intel_guc *guc) +{ + u32 action[] = { + INTEL_GUC_ACTION_EXIT_S_STATE, + GUC_POWER_D0, + }; + + /* + * If GuC communication is enabled but submission is not supported, + * we do not need to resume the GuC but we do need to enable the + * GuC communication on resume (above). + */ + if (!intel_guc_submission_is_enabled(guc)) + return 0; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +/** + * DOC: GuC Memory Management + * + * GuC can't allocate any memory for its own usage, so all the allocations must + * be handled by the host driver. GuC accesses the memory via the GGTT, with the + * exception of the top and bottom parts of the 4GB address space, which are + * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM) + * or other parts of the HW. The driver must take care not to place objects that + * the GuC is going to access in these reserved ranges. The layout of the GuC + * address space is shown below: + * + * :: + * + * +===========> +====================+ <== FFFF_FFFF + * ^ | Reserved | + * | +====================+ <== GUC_GGTT_TOP + * | | | + * | | DRAM | + * GuC | | + * Address +===> +====================+ <== GuC ggtt_pin_bias + * Space ^ | | + * | | | | + * | GuC | GuC | + * | WOPCM | WOPCM | + * | Size | | + * | | | | + * v v | | + * +=======+===> +====================+ <== 0000_0000 + * + * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM + * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped + * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size. + */ + +/** + * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage + * @guc: the guc + * @size: size of area to allocate (both virtual space and memory) + * + * This is a wrapper to create an object for use with the GuC. In order to + * use it inside the GuC, an object needs to be pinned lifetime, so we allocate + * both some backing storage and a range inside the Global GTT. We must pin + * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that + * range is reserved inside GuC. + * + * Return: A i915_vma if successful, otherwise an ERR_PTR. + */ +struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u64 flags; + int ret; + + obj = i915_gem_object_create_shmem(gt->i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) + goto err; + + flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + ret = i915_vma_pin(vma, 0, 0, flags); + if (ret) { + vma = ERR_PTR(ret); + goto err; + } + + return i915_vma_make_unshrinkable(vma); + +err: + i915_gem_object_put(obj); + return vma; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h new file mode 100644 index 000000000000..e6400204a2bd --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_GUC_H_ +#define _INTEL_GUC_H_ + +#include "intel_uncore.h" +#include "intel_guc_fw.h" +#include "intel_guc_fwif.h" +#include "intel_guc_ct.h" +#include "intel_guc_log.h" +#include "intel_guc_reg.h" +#include "intel_uc_fw.h" +#include "i915_utils.h" +#include "i915_vma.h" + +struct __guc_ads_blob; + +/* + * Top level structure of GuC. It handles firmware loading and manages client + * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy + * ExecList submission. + */ +struct intel_guc { + struct intel_uc_fw fw; + struct intel_guc_log log; + struct intel_guc_ct ct; + + /* intel_guc_recv interrupt related state */ + spinlock_t irq_lock; + unsigned int msg_enabled_mask; + + struct { + bool enabled; + void (*reset)(struct intel_guc *guc); + void (*enable)(struct intel_guc *guc); + void (*disable)(struct intel_guc *guc); + } interrupts; + + bool submission_supported; + + struct i915_vma *ads_vma; + struct __guc_ads_blob *ads_blob; + + struct i915_vma *stage_desc_pool; + void *stage_desc_pool_vaddr; + struct ida stage_ids; + + struct intel_guc_client *execbuf_client; + + DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS); + /* Cyclic counter mod pagesize */ + u32 db_cacheline; + + /* Control params for fw initialization */ + u32 params[GUC_CTL_MAX_DWORDS]; + + /* GuC's FW specific registers used in MMIO send */ + struct { + u32 base; + unsigned int count; + enum forcewake_domains fw_domains; + } send_regs; + + /* Store msg (e.g. log flush) that we see while CTBs are disabled */ + u32 mmio_msg; + + /* To serialize the intel_guc_send actions */ + struct mutex send_mutex; + + /* GuC's FW specific send function */ + int (*send)(struct intel_guc *guc, const u32 *data, u32 len, + u32 *response_buf, u32 response_buf_size); + + /* GuC's FW specific event handler function */ + void (*handler)(struct intel_guc *guc); + + /* GuC's FW specific notify function */ + void (*notify)(struct intel_guc *guc); +}; + +static +inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) +{ + return guc->send(guc, action, len, NULL, 0); +} + +static inline int +intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) +{ + return guc->send(guc, action, len, response_buf, response_buf_size); +} + +static inline void intel_guc_notify(struct intel_guc *guc) +{ + guc->notify(guc); +} + +static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) +{ + guc->handler(guc); +} + +/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ +#define GUC_GGTT_TOP 0xFEE00000 + +/** + * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma + * @guc: intel_guc structure. + * @vma: i915 graphics virtual memory area. + * + * GuC does not allow any gfx GGTT address that falls into range + * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. + * Currently, in order to exclude [0, ggtt.pin_bias) address space from + * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() + * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. + * + * Return: GGTT offset of the @vma. + */ +static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, + struct i915_vma *vma) +{ + u32 offset = i915_ggtt_offset(vma); + + GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); + GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); + + return offset; +} + +void intel_guc_init_early(struct intel_guc *guc); +void intel_guc_init_send_regs(struct intel_guc *guc); +void intel_guc_write_params(struct intel_guc *guc); +int intel_guc_init(struct intel_guc *guc); +void intel_guc_fini(struct intel_guc *guc); +int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +void intel_guc_to_host_event_handler(struct intel_guc *guc); +void intel_guc_to_host_event_handler_nop(struct intel_guc *guc); +int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, + const u32 *payload, u32 len); +int intel_guc_sample_forcewake(struct intel_guc *guc); +int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); +int intel_guc_suspend(struct intel_guc *guc); +int intel_guc_resume(struct intel_guc *guc); +struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); + +static inline bool intel_guc_is_supported(struct intel_guc *guc) +{ + return intel_uc_fw_is_supported(&guc->fw); +} + +static inline bool intel_guc_is_enabled(struct intel_guc *guc) +{ + return intel_uc_fw_is_enabled(&guc->fw); +} + +static inline bool intel_guc_is_running(struct intel_guc *guc) +{ + return intel_uc_fw_is_running(&guc->fw); +} + +static inline int intel_guc_sanitize(struct intel_guc *guc) +{ + intel_uc_fw_sanitize(&guc->fw); + guc->mmio_msg = 0; + + return 0; +} + +static inline bool intel_guc_is_submission_supported(struct intel_guc *guc) +{ + return guc->submission_supported; +} + +static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) +{ + spin_lock_irq(&guc->irq_lock); + guc->msg_enabled_mask |= mask; + spin_unlock_irq(&guc->irq_lock); +} + +static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) +{ + spin_lock_irq(&guc->irq_lock); + guc->msg_enabled_mask &= ~mask; + spin_unlock_irq(&guc->irq_lock); +} + +int intel_guc_reset_engine(struct intel_guc *guc, + struct intel_engine_cs *engine); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c new file mode 100644 index 000000000000..ca6674b8e00c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#include "gt/intel_gt.h" +#include "intel_guc_ads.h" +#include "intel_uc.h" +#include "i915_drv.h" + +/* + * The Additional Data Struct (ADS) has pointers for different buffers used by + * the GuC. One single gem object contains the ADS struct itself (guc_ads), the + * scheduling policies (guc_policies), a structure describing a collection of + * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save + * its internal state for sleep. + */ + +static void guc_policy_init(struct guc_policy *policy) +{ + policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US; + policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US; + policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US; + policy->policy_flags = 0; +} + +static void guc_policies_init(struct guc_policies *policies) +{ + struct guc_policy *policy; + u32 p, i; + + policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US; + policies->max_num_work_items = POLICY_MAX_NUM_WI; + + for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { + for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) { + policy = &policies->policy[p][i]; + + guc_policy_init(policy); + } + } + + policies->is_valid = 1; +} + +static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num) +{ + memset(pool, 0, num * sizeof(*pool)); +} + +/* + * The first 80 dwords of the register state context, containing the + * execlists and ppgtt registers. + */ +#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) + +/* The ads obj includes the struct itself and buffers passed to GuC */ +struct __guc_ads_blob { + struct guc_ads ads; + struct guc_policies policies; + struct guc_mmio_reg_state reg_state; + struct guc_gt_system_info system_info; + struct guc_clients_info clients_info; + struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE]; + u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; +} __packed; + +static void __guc_ads_init(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; + struct __guc_ads_blob *blob = guc->ads_blob; + const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; + u32 base; + u8 engine_class; + + /* GuC scheduling policies */ + guc_policies_init(&blob->policies); + + /* + * GuC expects a per-engine-class context image and size + * (minus hwsp and ring context). The context image will be + * used to reinitialize engines after a reset. It must exist + * and be pinned in the GGTT, so that the address won't change after + * we have told GuC where to find it. The context size will be used + * to validate that the LRC base + size fall within allowed GGTT. + */ + for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { + if (engine_class == OTHER_CLASS) + continue; + /* + * TODO: Set context pointer to default state to allow + * GuC to re-init guilty contexts after internal reset. + */ + blob->ads.golden_context_lrca[engine_class] = 0; + blob->ads.eng_state_size[engine_class] = + intel_engine_context_size(dev_priv, engine_class) - + skipped_size; + } + + /* System info */ + blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask); + blob->system_info.rcs_enabled = 1; + blob->system_info.bcs_enabled = 1; + + blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv); + blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv); + blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; + + base = intel_guc_ggtt_offset(guc, guc->ads_vma); + + /* Clients info */ + guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool)); + + blob->clients_info.clients_num = 1; + blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool); + blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool); + + /* ADS */ + blob->ads.scheduler_policies = base + ptr_offset(blob, policies); + blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); + blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); + blob->ads.gt_system_info = base + ptr_offset(blob, system_info); + blob->ads.clients_info = base + ptr_offset(blob, clients_info); + + i915_gem_object_flush_map(guc->ads_vma->obj); +} + +/** + * intel_guc_ads_create() - allocates and initializes GuC ADS. + * @guc: intel_guc struct + * + * GuC needs memory block (Additional Data Struct), where it will store + * some data. Allocate and initialize such memory block for GuC use. + */ +int intel_guc_ads_create(struct intel_guc *guc) +{ + const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob)); + struct i915_vma *vma; + void *blob; + int ret; + + GEM_BUG_ON(guc->ads_vma); + + vma = intel_guc_allocate_vma(guc, size); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + goto err_vma; + } + + guc->ads_vma = vma; + guc->ads_blob = blob; + + __guc_ads_init(guc); + + return 0; + +err_vma: + i915_vma_unpin_and_release(&guc->ads_vma, 0); + return ret; +} + +void intel_guc_ads_destroy(struct intel_guc *guc) +{ + i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP); +} + +/** + * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse + * @guc: intel_guc struct + * + * GuC stores some data in ADS, which might be stale after a reset. + * Reinitialize whole ADS in case any part of it was corrupted during + * previous GuC run. + */ +void intel_guc_ads_reset(struct intel_guc *guc) +{ + if (!guc->ads_vma) + return; + __guc_ads_init(guc); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h new file mode 100644 index 000000000000..b00d3ae1113a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_GUC_ADS_H_ +#define _INTEL_GUC_ADS_H_ + +struct intel_guc; + +int intel_guc_ads_create(struct intel_guc *guc); +void intel_guc_ads_destroy(struct intel_guc *guc); +void intel_guc_ads_reset(struct intel_guc *guc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c new file mode 100644 index 000000000000..b49115517510 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -0,0 +1,897 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2016-2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_guc_ct.h" + +#ifdef CONFIG_DRM_I915_DEBUG_GUC +#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__) +#else +#define CT_DEBUG_DRIVER(...) do { } while (0) +#endif + +struct ct_request { + struct list_head link; + u32 fence; + u32 status; + u32 response_len; + u32 *response_buf; +}; + +struct ct_incoming_request { + struct list_head link; + u32 msg[]; +}; + +enum { CTB_SEND = 0, CTB_RECV = 1 }; + +enum { CTB_OWNER_HOST = 0 }; + +static void ct_incoming_request_worker_func(struct work_struct *w); + +/** + * intel_guc_ct_init_early - Initialize CT state without requiring device access + * @ct: pointer to CT struct + */ +void intel_guc_ct_init_early(struct intel_guc_ct *ct) +{ + /* we're using static channel owners */ + ct->host_channel.owner = CTB_OWNER_HOST; + + spin_lock_init(&ct->lock); + INIT_LIST_HEAD(&ct->pending_requests); + INIT_LIST_HEAD(&ct->incoming_requests); + INIT_WORK(&ct->worker, ct_incoming_request_worker_func); +} + +static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) +{ + return container_of(ct, struct intel_guc, ct); +} + +static inline const char *guc_ct_buffer_type_to_str(u32 type) +{ + switch (type) { + case INTEL_GUC_CT_BUFFER_TYPE_SEND: + return "SEND"; + case INTEL_GUC_CT_BUFFER_TYPE_RECV: + return "RECV"; + default: + return "<invalid>"; + } +} + +static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, + u32 cmds_addr, u32 size, u32 owner) +{ + CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n", + desc, cmds_addr, size, owner); + memset(desc, 0, sizeof(*desc)); + desc->addr = cmds_addr; + desc->size = size; + desc->owner = owner; +} + +static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) +{ + CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n", + desc, desc->head, desc->tail); + desc->head = 0; + desc->tail = 0; + desc->is_in_error = 0; +} + +static int guc_action_register_ct_buffer(struct intel_guc *guc, + u32 desc_addr, + u32 type) +{ + u32 action[] = { + INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER, + desc_addr, + sizeof(struct guc_ct_buffer_desc), + type + }; + int err; + + /* Can't use generic send(), CT registration must go over MMIO */ + err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); + if (err) + DRM_ERROR("CT: register %s buffer failed; err=%d\n", + guc_ct_buffer_type_to_str(type), err); + return err; +} + +static int guc_action_deregister_ct_buffer(struct intel_guc *guc, + u32 owner, + u32 type) +{ + u32 action[] = { + INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER, + owner, + type + }; + int err; + + /* Can't use generic send(), CT deregistration must go over MMIO */ + err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); + if (err) + DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n", + guc_ct_buffer_type_to_str(type), owner, err); + return err; +} + +static int ctch_init(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) +{ + struct i915_vma *vma; + void *blob; + int err; + int i; + + GEM_BUG_ON(ctch->vma); + + /* We allocate 1 page to hold both descriptors and both buffers. + * ___________..................... + * |desc (SEND)| : + * |___________| PAGE/4 + * :___________....................: + * |desc (RECV)| : + * |___________| PAGE/4 + * :_______________________________: + * |cmds (SEND) | + * | PAGE/4 + * |_______________________________| + * |cmds (RECV) | + * | PAGE/4 + * |_______________________________| + * + * Each message can use a maximum of 32 dwords and we don't expect to + * have more than 1 in flight at any time, so we have enough space. + * Some logic further ahead will rely on the fact that there is only 1 + * page and that it is always mapped, so if the size is changed the + * other code will need updating as well. + */ + + /* allocate vma */ + vma = intel_guc_allocate_vma(guc, PAGE_SIZE); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_out; + } + ctch->vma = vma; + + /* map first page */ + blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(blob)) { + err = PTR_ERR(blob); + goto err_vma; + } + CT_DEBUG_DRIVER("CT: vma base=%#x\n", + intel_guc_ggtt_offset(guc, ctch->vma)); + + /* store pointers to desc and cmds */ + for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { + GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); + ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i; + ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2; + } + + return 0; + +err_vma: + i915_vma_unpin_and_release(&ctch->vma, 0); +err_out: + CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", + ctch->owner, err); + return err; +} + +static void ctch_fini(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) +{ + GEM_BUG_ON(ctch->enabled); + + i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP); +} + +static int ctch_enable(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) +{ + u32 base; + int err; + int i; + + GEM_BUG_ON(!ctch->vma); + + GEM_BUG_ON(ctch->enabled); + + /* vma should be already allocated and map'ed */ + base = intel_guc_ggtt_offset(guc, ctch->vma); + + /* (re)initialize descriptors + * cmds buffers are in the second half of the blob page + */ + for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { + GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); + guc_ct_buffer_desc_init(ctch->ctbs[i].desc, + base + PAGE_SIZE/4 * i + PAGE_SIZE/2, + PAGE_SIZE/4, + ctch->owner); + } + + /* register buffers, starting wirh RECV buffer + * descriptors are in first half of the blob + */ + err = guc_action_register_ct_buffer(guc, + base + PAGE_SIZE/4 * CTB_RECV, + INTEL_GUC_CT_BUFFER_TYPE_RECV); + if (unlikely(err)) + goto err_out; + + err = guc_action_register_ct_buffer(guc, + base + PAGE_SIZE/4 * CTB_SEND, + INTEL_GUC_CT_BUFFER_TYPE_SEND); + if (unlikely(err)) + goto err_deregister; + + ctch->enabled = true; + + return 0; + +err_deregister: + guc_action_deregister_ct_buffer(guc, + ctch->owner, + INTEL_GUC_CT_BUFFER_TYPE_RECV); +err_out: + DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err); + return err; +} + +static void ctch_disable(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) +{ + GEM_BUG_ON(!ctch->enabled); + + ctch->enabled = false; + + guc_action_deregister_ct_buffer(guc, + ctch->owner, + INTEL_GUC_CT_BUFFER_TYPE_SEND); + guc_action_deregister_ct_buffer(guc, + ctch->owner, + INTEL_GUC_CT_BUFFER_TYPE_RECV); +} + +static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) +{ + /* For now it's trivial */ + return ++ctch->next_fence; +} + +/** + * DOC: CTB Host to GuC request + * + * Format of the CTB Host to GuC request message is as follows:: + * + * +------------+---------+---------+---------+---------+ + * | msg[0] | [1] | [2] | ... | [n-1] | + * +------------+---------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+---------+ + * | | 0 | 1 | ... | n | + * +============+=========+=========+=========+=========+ + * | len >= 1 | FENCE | request specific data | + * +------+-----+---------+---------+---------+---------+ + * + * ^-----------------len-------------------^ + */ + +static int ctb_write(struct intel_guc_ct_buffer *ctb, + const u32 *action, + u32 len /* in dwords */, + u32 fence, + bool want_response) +{ + struct guc_ct_buffer_desc *desc = ctb->desc; + u32 head = desc->head / 4; /* in dwords */ + u32 tail = desc->tail / 4; /* in dwords */ + u32 size = desc->size / 4; /* in dwords */ + u32 used; /* in dwords */ + u32 header; + u32 *cmds = ctb->cmds; + unsigned int i; + + GEM_BUG_ON(desc->size % 4); + GEM_BUG_ON(desc->head % 4); + GEM_BUG_ON(desc->tail % 4); + GEM_BUG_ON(tail >= size); + + /* + * tail == head condition indicates empty. GuC FW does not support + * using up the entire buffer to get tail == head meaning full. + */ + if (tail < head) + used = (size - head) + tail; + else + used = tail - head; + + /* make sure there is a space including extra dw for the fence */ + if (unlikely(used + len + 1 >= size)) + return -ENOSPC; + + /* + * Write the message. The format is the following: + * DW0: header (including action code) + * DW1: fence + * DW2+: action data + */ + header = (len << GUC_CT_MSG_LEN_SHIFT) | + (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | + (want_response ? GUC_CT_MSG_SEND_STATUS : 0) | + (action[0] << GUC_CT_MSG_ACTION_SHIFT); + + CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n", + 4, &header, 4, &fence, + 4 * (len - 1), &action[1]); + + cmds[tail] = header; + tail = (tail + 1) % size; + + cmds[tail] = fence; + tail = (tail + 1) % size; + + for (i = 1; i < len; i++) { + cmds[tail] = action[i]; + tail = (tail + 1) % size; + } + + /* now update desc tail (back in bytes) */ + desc->tail = tail * 4; + GEM_BUG_ON(desc->tail > desc->size); + + return 0; +} + +/** + * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update. + * @desc: buffer descriptor + * @fence: response fence + * @status: placeholder for status + * + * Guc will update CT buffer descriptor with new fence and status + * after processing the command identified by the fence. Wait for + * specified fence and then read from the descriptor status of the + * command. + * + * Return: + * * 0 response received (status is valid) + * * -ETIMEDOUT no response within hardcoded timeout + * * -EPROTO no response, CT buffer is in error + */ +static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc, + u32 fence, + u32 *status) +{ + int err; + + /* + * Fast commands should complete in less than 10us, so sample quickly + * up to that length of time, then switch to a slower sleep-wait loop. + * No GuC command should ever take longer than 10ms. + */ +#define done (READ_ONCE(desc->fence) == fence) + err = wait_for_us(done, 10); + if (err) + err = wait_for(done, 10); +#undef done + + if (unlikely(err)) { + DRM_ERROR("CT: fence %u failed; reported fence=%u\n", + fence, desc->fence); + + if (WARN_ON(desc->is_in_error)) { + /* Something went wrong with the messaging, try to reset + * the buffer and hope for the best + */ + guc_ct_buffer_desc_reset(desc); + err = -EPROTO; + } + } + + *status = desc->status; + return err; +} + +/** + * wait_for_ct_request_update - Wait for CT request state update. + * @req: pointer to pending request + * @status: placeholder for status + * + * For each sent request, Guc shall send bac CT response message. + * Our message handler will update status of tracked request once + * response message with given fence is received. Wait here and + * check for valid response status value. + * + * Return: + * * 0 response received (status is valid) + * * -ETIMEDOUT no response within hardcoded timeout + */ +static int wait_for_ct_request_update(struct ct_request *req, u32 *status) +{ + int err; + + /* + * Fast commands should complete in less than 10us, so sample quickly + * up to that length of time, then switch to a slower sleep-wait loop. + * No GuC command should ever take longer than 10ms. + */ +#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status)) + err = wait_for_us(done, 10); + if (err) + err = wait_for(done, 10); +#undef done + + if (unlikely(err)) + DRM_ERROR("CT: fence %u err %d\n", req->fence, err); + + *status = req->status; + return err; +} + +static int ctch_send(struct intel_guc_ct *ct, + struct intel_guc_ct_channel *ctch, + const u32 *action, + u32 len, + u32 *response_buf, + u32 response_buf_size, + u32 *status) +{ + struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND]; + struct guc_ct_buffer_desc *desc = ctb->desc; + struct ct_request request; + unsigned long flags; + u32 fence; + int err; + + GEM_BUG_ON(!ctch->enabled); + GEM_BUG_ON(!len); + GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); + GEM_BUG_ON(!response_buf && response_buf_size); + + fence = ctch_get_next_fence(ctch); + request.fence = fence; + request.status = 0; + request.response_len = response_buf_size; + request.response_buf = response_buf; + + spin_lock_irqsave(&ct->lock, flags); + list_add_tail(&request.link, &ct->pending_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + err = ctb_write(ctb, action, len, fence, !!response_buf); + if (unlikely(err)) + goto unlink; + + intel_guc_notify(ct_to_guc(ct)); + + if (response_buf) + err = wait_for_ct_request_update(&request, status); + else + err = wait_for_ctb_desc_update(desc, fence, status); + if (unlikely(err)) + goto unlink; + + if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) { + err = -EIO; + goto unlink; + } + + if (response_buf) { + /* There shall be no data in the status */ + WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status)); + /* Return actual response len */ + err = request.response_len; + } else { + /* There shall be no response payload */ + WARN_ON(request.response_len); + /* Return data decoded from the status dword */ + err = INTEL_GUC_MSG_TO_DATA(*status); + } + +unlink: + spin_lock_irqsave(&ct->lock, flags); + list_del(&request.link); + spin_unlock_irqrestore(&ct->lock, flags); + + return err; +} + +/* + * Command Transport (CT) buffer based GuC send function. + */ +int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) +{ + struct intel_guc_ct *ct = &guc->ct; + struct intel_guc_ct_channel *ctch = &ct->host_channel; + u32 status = ~0; /* undefined */ + int ret; + + mutex_lock(&guc->send_mutex); + + ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, + &status); + if (unlikely(ret < 0)) { + DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", + action[0], ret, status); + } else if (unlikely(ret)) { + CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n", + action[0], ret, ret); + } + + mutex_unlock(&guc->send_mutex); + return ret; +} + +static inline unsigned int ct_header_get_len(u32 header) +{ + return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK; +} + +static inline unsigned int ct_header_get_action(u32 header) +{ + return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK; +} + +static inline bool ct_header_is_response(u32 header) +{ + return !!(header & GUC_CT_MSG_IS_RESPONSE); +} + +static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) +{ + struct guc_ct_buffer_desc *desc = ctb->desc; + u32 head = desc->head / 4; /* in dwords */ + u32 tail = desc->tail / 4; /* in dwords */ + u32 size = desc->size / 4; /* in dwords */ + u32 *cmds = ctb->cmds; + s32 available; /* in dwords */ + unsigned int len; + unsigned int i; + + GEM_BUG_ON(desc->size % 4); + GEM_BUG_ON(desc->head % 4); + GEM_BUG_ON(desc->tail % 4); + GEM_BUG_ON(tail >= size); + GEM_BUG_ON(head >= size); + + /* tail == head condition indicates empty */ + available = tail - head; + if (unlikely(available == 0)) + return -ENODATA; + + /* beware of buffer wrap case */ + if (unlikely(available < 0)) + available += size; + CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail); + GEM_BUG_ON(available < 0); + + data[0] = cmds[head]; + head = (head + 1) % size; + + /* message len with header */ + len = ct_header_get_len(data[0]) + 1; + if (unlikely(len > (u32)available)) { + DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n", + 4, data, + 4 * (head + available - 1 > size ? + size - head : available - 1), &cmds[head], + 4 * (head + available - 1 > size ? + available - 1 - size + head : 0), &cmds[0]); + return -EPROTO; + } + + for (i = 1; i < len; i++) { + data[i] = cmds[head]; + head = (head + 1) % size; + } + CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data); + + desc->head = head * 4; + return 0; +} + +/** + * DOC: CTB GuC to Host response + * + * Format of the CTB GuC to Host response message is as follows:: + * + * +------------+---------+---------+---------+---------+---------+ + * | msg[0] | [1] | [2] | [3] | ... | [n-1] | + * +------------+---------+---------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+---------+---------+ + * | | 0 | 1 | 2 | ... | n | + * +============+=========+=========+=========+=========+=========+ + * | len >= 2 | FENCE | STATUS | response specific data | + * +------+-----+---------+---------+---------+---------+---------+ + * + * ^-----------------------len-----------------------^ + */ + +static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) +{ + u32 header = msg[0]; + u32 len = ct_header_get_len(header); + u32 msglen = len + 1; /* total message length including header */ + u32 fence; + u32 status; + u32 datalen; + struct ct_request *req; + bool found = false; + + GEM_BUG_ON(!ct_header_is_response(header)); + GEM_BUG_ON(!in_irq()); + + /* Response payload shall at least include fence and status */ + if (unlikely(len < 2)) { + DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg); + return -EPROTO; + } + + fence = msg[1]; + status = msg[2]; + datalen = len - 2; + + /* Format of the status follows RESPONSE message */ + if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) { + DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg); + return -EPROTO; + } + + CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status); + + spin_lock(&ct->lock); + list_for_each_entry(req, &ct->pending_requests, link) { + if (unlikely(fence != req->fence)) { + CT_DEBUG_DRIVER("CT: request %u awaits response\n", + req->fence); + continue; + } + if (unlikely(datalen > req->response_len)) { + DRM_ERROR("CT: response %u too long %*ph\n", + req->fence, 4 * msglen, msg); + datalen = 0; + } + if (datalen) + memcpy(req->response_buf, msg + 3, 4 * datalen); + req->response_len = datalen; + WRITE_ONCE(req->status, status); + found = true; + break; + } + spin_unlock(&ct->lock); + + if (!found) + DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg); + return 0; +} + +static void ct_process_request(struct intel_guc_ct *ct, + u32 action, u32 len, const u32 *payload) +{ + struct intel_guc *guc = ct_to_guc(ct); + int ret; + + CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload); + + switch (action) { + case INTEL_GUC_ACTION_DEFAULT: + ret = intel_guc_to_host_process_recv_msg(guc, payload, len); + if (unlikely(ret)) + goto fail_unexpected; + break; + + default: +fail_unexpected: + DRM_ERROR("CT: unexpected request %x %*ph\n", + action, 4 * len, payload); + break; + } +} + +static bool ct_process_incoming_requests(struct intel_guc_ct *ct) +{ + unsigned long flags; + struct ct_incoming_request *request; + u32 header; + u32 *payload; + bool done; + + spin_lock_irqsave(&ct->lock, flags); + request = list_first_entry_or_null(&ct->incoming_requests, + struct ct_incoming_request, link); + if (request) + list_del(&request->link); + done = !!list_empty(&ct->incoming_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + if (!request) + return true; + + header = request->msg[0]; + payload = &request->msg[1]; + ct_process_request(ct, + ct_header_get_action(header), + ct_header_get_len(header), + payload); + + kfree(request); + return done; +} + +static void ct_incoming_request_worker_func(struct work_struct *w) +{ + struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker); + bool done; + + done = ct_process_incoming_requests(ct); + if (!done) + queue_work(system_unbound_wq, &ct->worker); +} + +/** + * DOC: CTB GuC to Host request + * + * Format of the CTB GuC to Host request message is as follows:: + * + * +------------+---------+---------+---------+---------+---------+ + * | msg[0] | [1] | [2] | [3] | ... | [n-1] | + * +------------+---------+---------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+---------+---------+ + * | | 0 | 1 | 2 | ... | n | + * +============+=========+=========+=========+=========+=========+ + * | len | request specific data | + * +------+-----+---------+---------+---------+---------+---------+ + * + * ^-----------------------len-----------------------^ + */ + +static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) +{ + u32 header = msg[0]; + u32 len = ct_header_get_len(header); + u32 msglen = len + 1; /* total message length including header */ + struct ct_incoming_request *request; + unsigned long flags; + + GEM_BUG_ON(ct_header_is_response(header)); + + request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC); + if (unlikely(!request)) { + DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg); + return 0; /* XXX: -ENOMEM ? */ + } + memcpy(request->msg, msg, 4 * msglen); + + spin_lock_irqsave(&ct->lock, flags); + list_add_tail(&request->link, &ct->incoming_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + queue_work(system_unbound_wq, &ct->worker); + return 0; +} + +static void ct_process_host_channel(struct intel_guc_ct *ct) +{ + struct intel_guc_ct_channel *ctch = &ct->host_channel; + struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV]; + u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ + int err = 0; + + if (!ctch->enabled) + return; + + do { + err = ctb_read(ctb, msg); + if (err) + break; + + if (ct_header_is_response(msg[0])) + err = ct_handle_response(ct, msg); + else + err = ct_handle_request(ct, msg); + } while (!err); + + if (GEM_WARN_ON(err == -EPROTO)) { + DRM_ERROR("CT: corrupted message detected!\n"); + ctb->desc->is_in_error = 1; + } +} + +/* + * When we're communicating with the GuC over CT, GuC uses events + * to notify us about new messages being posted on the RECV buffer. + */ +void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) +{ + struct intel_guc_ct *ct = &guc->ct; + + ct_process_host_channel(ct); +} + +/** + * intel_guc_ct_init - Init CT communication + * @ct: pointer to CT struct + * + * Allocate memory required for communication via + * the CT channel. + * + * Return: 0 on success, a negative errno code on failure. + */ +int intel_guc_ct_init(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + int err; + + err = ctch_init(guc, ctch); + if (unlikely(err)) { + DRM_ERROR("CT: can't open channel %d; err=%d\n", + ctch->owner, err); + return err; + } + + GEM_BUG_ON(!ctch->vma); + return 0; +} + +/** + * intel_guc_ct_fini - Fini CT communication + * @ct: pointer to CT struct + * + * Deallocate memory required for communication via + * the CT channel. + */ +void intel_guc_ct_fini(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + + ctch_fini(guc, ctch); +} + +/** + * intel_guc_ct_enable - Enable buffer based command transport. + * @ct: pointer to CT struct + * + * Return: 0 on success, a negative errno code on failure. + */ +int intel_guc_ct_enable(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + + if (ctch->enabled) + return 0; + + return ctch_enable(guc, ctch); +} + +/** + * intel_guc_ct_disable - Disable buffer based command transport. + * @ct: pointer to CT struct + */ +void intel_guc_ct_disable(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + + if (!ctch->enabled) + return; + + ctch_disable(guc, ctch); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h new file mode 100644 index 000000000000..7c24d83f5c24 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2016-2019 Intel Corporation + */ + +#ifndef _INTEL_GUC_CT_H_ +#define _INTEL_GUC_CT_H_ + +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include "intel_guc_fwif.h" + +struct i915_vma; +struct intel_guc; + +/** + * DOC: Command Transport (CT). + * + * Buffer based command transport is a replacement for MMIO based mechanism. + * It can be used to perform both host-2-guc and guc-to-host communication. + */ + +/** Represents single command transport buffer. + * + * A single command transport buffer consists of two parts, the header + * record (command transport buffer descriptor) and the actual buffer which + * holds the commands. + * + * @desc: pointer to the buffer descriptor + * @cmds: pointer to the commands buffer + */ +struct intel_guc_ct_buffer { + struct guc_ct_buffer_desc *desc; + u32 *cmds; +}; + +/** Represents pair of command transport buffers. + * + * Buffers go in pairs to allow bi-directional communication. + * To simplify the code we place both of them in the same vma. + * Buffers from the same pair must share unique owner id. + * + * @vma: pointer to the vma with pair of CT buffers + * @ctbs: buffers for sending(0) and receiving(1) commands + * @owner: unique identifier + * @next_fence: fence to be used with next send command + */ +struct intel_guc_ct_channel { + struct i915_vma *vma; + struct intel_guc_ct_buffer ctbs[2]; + u32 owner; + u32 next_fence; + bool enabled; +}; + +/** Holds all command transport channels. + * + * @host_channel: main channel used by the host + */ +struct intel_guc_ct { + struct intel_guc_ct_channel host_channel; + /* other channels are tbd */ + + /** @lock: protects pending requests list */ + spinlock_t lock; + + /** @pending_requests: list of requests waiting for response */ + struct list_head pending_requests; + + /** @incoming_requests: list of incoming requests */ + struct list_head incoming_requests; + + /** @worker: worker for handling incoming requests */ + struct work_struct worker; +}; + +void intel_guc_ct_init_early(struct intel_guc_ct *ct); +int intel_guc_ct_init(struct intel_guc_ct *ct); +void intel_guc_ct_fini(struct intel_guc_ct *ct); +int intel_guc_ct_enable(struct intel_guc_ct *ct); +void intel_guc_ct_disable(struct intel_guc_ct *ct); + +static inline void intel_guc_ct_stop(struct intel_guc_ct *ct) +{ + ct->host_channel.enabled = false; +} + +int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +void intel_guc_to_host_event_handler_ct(struct intel_guc *guc); + +#endif /* _INTEL_GUC_CT_H_ */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c new file mode 100644 index 000000000000..5528224448f6 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2014-2019 Intel Corporation + * + * Authors: + * Vinit Azad <[email protected]> + * Ben Widawsky <[email protected]> + * Dave Gordon <[email protected]> + * Alex Dai <[email protected]> + */ + +#include "gt/intel_gt.h" +#include "intel_guc_fw.h" +#include "i915_drv.h" + +/** + * intel_guc_fw_init_early() - initializes GuC firmware struct + * @guc: intel_guc struct + * + * On platforms with GuC selects firmware for uploading + */ +void intel_guc_fw_init_early(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + + intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915), + INTEL_INFO(i915)->platform, INTEL_REVID(i915)); +} + +static void guc_prepare_xfer(struct intel_uncore *uncore) +{ + u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES | + GUC_ENABLE_READ_CACHE_LOGIC | + GUC_ENABLE_MIA_CACHING | + GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | + GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | + GUC_ENABLE_MIA_CLOCK_GATING; + + /* Must program this register before loading the ucode with DMA */ + intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags); + + if (IS_GEN9_LP(uncore->i915)) + intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); + else + intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); + + if (IS_GEN(uncore->i915, 9)) { + /* DOP Clock Gating Enable for GuC clocks */ + intel_uncore_rmw(uncore, GEN7_MISCCPCTL, + 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE); + + /* allows for 5us (in 10ns units) before GT can go to RC6 */ + intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF); + } +} + +/* Copy RSA signature from the fw image to HW for verification */ +static void guc_xfer_rsa(struct intel_uc_fw *guc_fw, + struct intel_uncore *uncore) +{ + u32 rsa[UOS_RSA_SCRATCH_COUNT]; + size_t copied; + int i; + + copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa)); + GEM_BUG_ON(copied < sizeof(rsa)); + + for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) + intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]); +} + +/* + * Read the GuC status register (GUC_STATUS) and store it in the + * specified location; then return a boolean indicating whether + * the value matches either of two values representing completion + * of the GuC boot process. + * + * This is used for polling the GuC status in a wait_for() + * loop below. + */ +static inline bool guc_ready(struct intel_uncore *uncore, u32 *status) +{ + u32 val = intel_uncore_read(uncore, GUC_STATUS); + u32 uk_val = val & GS_UKERNEL_MASK; + + *status = val; + return (uk_val == GS_UKERNEL_READY) || + ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE)); +} + +static int guc_wait_ucode(struct intel_uncore *uncore) +{ + u32 status; + int ret; + + /* + * Wait for the GuC to start up. + * NB: Docs recommend not using the interrupt for completion. + * Measurements indicate this should take no more than 20ms, so a + * timeout here indicates that the GuC has failed and is unusable. + * (Higher levels of the driver may decide to reset the GuC and + * attempt the ucode load again if this happens.) + */ + ret = wait_for(guc_ready(uncore, &status), 100); + DRM_DEBUG_DRIVER("GuC status %#x\n", status); + + if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { + DRM_ERROR("GuC firmware signature verification failed\n"); + ret = -ENOEXEC; + } + + if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { + DRM_ERROR("GuC firmware exception. EIP: %#x\n", + intel_uncore_read(uncore, SOFT_SCRATCH(13))); + ret = -ENXIO; + } + + return ret; +} + +/** + * intel_guc_fw_upload() - load GuC uCode to device + * @guc: intel_guc structure + * + * Called from intel_uc_init_hw() during driver load, resume from sleep and + * after a GPU reset. + * + * The firmware image should have already been fetched into memory, so only + * check that fetch succeeded, and then transfer the image to the h/w. + * + * Return: non-zero code on error + */ +int intel_guc_fw_upload(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_uncore *uncore = gt->uncore; + int ret; + + guc_prepare_xfer(uncore); + + /* + * Note that GuC needs the CSS header plus uKernel code to be copied + * by the DMA engine in one operation, whereas the RSA signature is + * loaded via MMIO. + */ + guc_xfer_rsa(&guc->fw, uncore); + + /* + * Current uCode expects the code to be loaded at 8k; locations below + * this are used for the stack. + */ + ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE); + if (ret) + goto out; + + ret = guc_wait_ucode(uncore); + if (ret) + goto out; + + intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_RUNNING); + return 0; + +out: + intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_FAIL); + return ret; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h new file mode 100644 index 000000000000..b5ab639d7259 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2017-2019 Intel Corporation + */ + +#ifndef _INTEL_GUC_FW_H_ +#define _INTEL_GUC_FW_H_ + +struct intel_guc; + +void intel_guc_fw_init_early(struct intel_guc *guc); +int intel_guc_fw_upload(struct intel_guc *guc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h new file mode 100644 index 000000000000..a26a85d50209 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -0,0 +1,604 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_GUC_FWIF_H +#define _INTEL_GUC_FWIF_H + +#include <linux/bits.h> +#include <linux/compiler.h> +#include <linux/types.h> + +#define GUC_CLIENT_PRIORITY_KMD_HIGH 0 +#define GUC_CLIENT_PRIORITY_HIGH 1 +#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2 +#define GUC_CLIENT_PRIORITY_NORMAL 3 +#define GUC_CLIENT_PRIORITY_NUM 4 + +#define GUC_MAX_STAGE_DESCRIPTORS 1024 +#define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS + +#define GUC_RENDER_ENGINE 0 +#define GUC_VIDEO_ENGINE 1 +#define GUC_BLITTER_ENGINE 2 +#define GUC_VIDEOENHANCE_ENGINE 3 +#define GUC_VIDEO_ENGINE2 4 +#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) + +#define GUC_MAX_ENGINE_CLASSES 5 +#define GUC_MAX_INSTANCES_PER_CLASS 16 + +#define GUC_DOORBELL_INVALID 256 + +#define GUC_DB_SIZE (PAGE_SIZE) +#define GUC_WQ_SIZE (PAGE_SIZE * 2) + +/* Work queue item header definitions */ +#define WQ_STATUS_ACTIVE 1 +#define WQ_STATUS_SUSPENDED 2 +#define WQ_STATUS_CMD_ERROR 3 +#define WQ_STATUS_ENGINE_ID_NOT_USED 4 +#define WQ_STATUS_SUSPENDED_FROM_RESET 5 +#define WQ_TYPE_SHIFT 0 +#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT) +#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT) +#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT) +#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT) +#define WQ_TARGET_SHIFT 10 +#define WQ_LEN_SHIFT 16 +#define WQ_NO_WCFLUSH_WAIT (1 << 27) +#define WQ_PRESENT_WORKLOAD (1 << 28) + +#define WQ_RING_TAIL_SHIFT 20 +#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ +#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) + +#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) +#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) +#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) +#define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3) +#define GUC_STAGE_DESC_ATTR_RESET BIT(4) +#define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5) +#define GUC_STAGE_DESC_ATTR_PCH BIT(6) +#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7) + +/* New GuC control data */ +#define GUC_CTL_CTXINFO 0 +#define GUC_CTL_CTXNUM_IN16_SHIFT 0 +#define GUC_CTL_BASE_ADDR_SHIFT 12 + +#define GUC_CTL_LOG_PARAMS 1 +#define GUC_LOG_VALID (1 << 0) +#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) +#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3) +#define GUC_LOG_CRASH_SHIFT 4 +#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT) +#define GUC_LOG_DPC_SHIFT 6 +#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT) +#define GUC_LOG_ISR_SHIFT 9 +#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT) +#define GUC_LOG_BUF_ADDR_SHIFT 12 + +#define GUC_CTL_WA 2 +#define GUC_CTL_FEATURE 3 +#define GUC_CTL_DISABLE_SCHEDULER (1 << 14) + +#define GUC_CTL_DEBUG 4 +#define GUC_LOG_VERBOSITY_SHIFT 0 +#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) +#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT) +#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT) +#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT) +/* Verbosity range-check limits, without the shift */ +#define GUC_LOG_VERBOSITY_MIN 0 +#define GUC_LOG_VERBOSITY_MAX 3 +#define GUC_LOG_VERBOSITY_MASK 0x0000000f +#define GUC_LOG_DESTINATION_MASK (3 << 4) +#define GUC_LOG_DISABLED (1 << 6) +#define GUC_PROFILE_ENABLED (1 << 7) + +#define GUC_CTL_ADS 5 +#define GUC_ADS_ADDR_SHIFT 1 +#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT) + +#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ + +/* Work item for submitting workloads into work queue of GuC. */ +struct guc_wq_item { + u32 header; + u32 context_desc; + u32 submit_element_info; + u32 fence_id; +} __packed; + +struct guc_process_desc { + u32 stage_id; + u64 db_base_addr; + u32 head; + u32 tail; + u32 error_offset; + u64 wq_base_addr; + u32 wq_size_bytes; + u32 wq_status; + u32 engine_presence; + u32 priority; + u32 reserved[30]; +} __packed; + +/* engine id and context id is packed into guc_execlist_context.context_id*/ +#define GUC_ELC_CTXID_OFFSET 0 +#define GUC_ELC_ENGINE_OFFSET 29 + +/* The execlist context including software and HW information */ +struct guc_execlist_context { + u32 context_desc; + u32 context_id; + u32 ring_status; + u32 ring_lrca; + u32 ring_begin; + u32 ring_end; + u32 ring_next_free_location; + u32 ring_current_tail_pointer_value; + u8 engine_state_submit_value; + u8 engine_state_wait_value; + u16 pagefault_count; + u16 engine_submit_queue_count; +} __packed; + +/* + * This structure describes a stage set arranged for a particular communication + * between uKernel (GuC) and Driver (KMD). Technically, this is known as a + * "GuC Context descriptor" in the specs, but we use the term "stage descriptor" + * to avoid confusion with all the other things already named "context" in the + * driver. A static pool of these descriptors are stored inside a GEM object + * (stage_desc_pool) which is held for the entire lifetime of our interaction + * with the GuC, being allocated before the GuC is loaded with its firmware. + */ +struct guc_stage_desc { + u32 sched_common_area; + u32 stage_id; + u32 pas_id; + u8 engines_used; + u64 db_trigger_cpu; + u32 db_trigger_uk; + u64 db_trigger_phy; + u16 db_id; + + struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM]; + + u8 attribute; + + u32 priority; + + u32 wq_sampled_tail_offset; + u32 wq_total_submit_enqueues; + + u32 process_desc; + u32 wq_addr; + u32 wq_size; + + u32 engine_presence; + + u8 engine_suspended; + + u8 reserved0[3]; + u64 reserved1[1]; + + u64 desc_private; +} __packed; + +/** + * DOC: CTB based communication + * + * The CTB (command transport buffer) communication between Host and GuC + * is based on u32 data stream written to the shared buffer. One buffer can + * be used to transmit data only in one direction (one-directional channel). + * + * Current status of the each buffer is stored in the buffer descriptor. + * Buffer descriptor holds tail and head fields that represents active data + * stream. The tail field is updated by the data producer (sender), and head + * field is updated by the data consumer (receiver):: + * + * +------------+ + * | DESCRIPTOR | +=================+============+========+ + * +============+ | | MESSAGE(s) | | + * | address |--------->+=================+============+========+ + * +------------+ + * | head | ^-----head--------^ + * +------------+ + * | tail | ^---------tail-----------------^ + * +------------+ + * | size | ^---------------size--------------------^ + * +------------+ + * + * Each message in data stream starts with the single u32 treated as a header, + * followed by optional set of u32 data that makes message specific payload:: + * + * +------------+---------+---------+---------+ + * | MESSAGE | + * +------------+---------+---------+---------+ + * | msg[0] | [1] | ... | [n-1] | + * +------------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+ + * | | 0 | ... | n | + * +======+=====+=========+=========+=========+ + * | 31:16| code| | | | + * +------+-----+ | | | + * | 15:5|flags| | | | + * +------+-----+ | | | + * | 4:0| len| | | | + * +------+-----+---------+---------+---------+ + * + * ^-------------len-------------^ + * + * The message header consists of: + * + * - **len**, indicates length of the message payload (in u32) + * - **code**, indicates message code + * - **flags**, holds various bits to control message handling + */ + +/* + * Describes single command transport buffer. + * Used by both guc-master and clients. + */ +struct guc_ct_buffer_desc { + u32 addr; /* gfx address */ + u64 host_private; /* host private data */ + u32 size; /* size in bytes */ + u32 head; /* offset updated by GuC*/ + u32 tail; /* offset updated by owner */ + u32 is_in_error; /* error indicator */ + u32 fence; /* fence updated by GuC */ + u32 status; /* status updated by GuC */ + u32 owner; /* id of the channel owner */ + u32 owner_sub_id; /* owner-defined field for extra tracking */ + u32 reserved[5]; +} __packed; + +/* Type of command transport buffer */ +#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u +#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u + +/* + * Definition of the command transport message header (DW0) + * + * bit[4..0] message len (in dwords) + * bit[7..5] reserved + * bit[8] response (G2H only) + * bit[8] write fence to desc (H2G only) + * bit[9] write status to H2G buff (H2G only) + * bit[10] send status back via G2H (H2G only) + * bit[15..11] reserved + * bit[31..16] action code + */ +#define GUC_CT_MSG_LEN_SHIFT 0 +#define GUC_CT_MSG_LEN_MASK 0x1F +#define GUC_CT_MSG_IS_RESPONSE (1 << 8) +#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) +#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) +#define GUC_CT_MSG_SEND_STATUS (1 << 10) +#define GUC_CT_MSG_ACTION_SHIFT 16 +#define GUC_CT_MSG_ACTION_MASK 0xFFFF + +#define GUC_FORCEWAKE_RENDER (1 << 0) +#define GUC_FORCEWAKE_MEDIA (1 << 1) + +#define GUC_POWER_UNSPECIFIED 0 +#define GUC_POWER_D0 1 +#define GUC_POWER_D1 2 +#define GUC_POWER_D2 3 +#define GUC_POWER_D3 4 + +/* Scheduling policy settings */ + +/* Reset engine upon preempt failure */ +#define POLICY_RESET_ENGINE (1<<0) +/* Preempt to idle on quantum expiry */ +#define POLICY_PREEMPT_TO_IDLE (1<<1) + +#define POLICY_MAX_NUM_WI 15 +#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000 +#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000 +#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000 +#define POLICY_DEFAULT_FAULT_TIME_US 250000 + +struct guc_policy { + /* Time for one workload to execute. (in micro seconds) */ + u32 execution_quantum; + /* Time to wait for a preemption request to completed before issuing a + * reset. (in micro seconds). */ + u32 preemption_time; + /* How much time to allow to run after the first fault is observed. + * Then preempt afterwards. (in micro seconds) */ + u32 fault_time; + u32 policy_flags; + u32 reserved[8]; +} __packed; + +struct guc_policies { + struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES]; + u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES]; + /* In micro seconds. How much time to allow before DPC processing is + * called back via interrupt (to prevent DPC queue drain starving). + * Typically 1000s of micro seconds (example only, not granularity). */ + u32 dpc_promote_time; + + /* Must be set to take these new values. */ + u32 is_valid; + + /* Max number of WIs to process per call. A large value may keep CS + * idle. */ + u32 max_num_work_items; + + u32 reserved[4]; +} __packed; + +/* GuC MMIO reg state struct */ + + +#define GUC_REGSET_MAX_REGISTERS 64 +#define GUC_S3_SAVE_SPACE_PAGES 10 + +struct guc_mmio_reg { + u32 offset; + u32 value; + u32 flags; +#define GUC_REGSET_MASKED (1 << 0) +} __packed; + +struct guc_mmio_regset { + struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS]; + u32 values_valid; + u32 number_of_registers; +} __packed; + +/* GuC register sets */ +struct guc_mmio_reg_state { + struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; + u32 reserved[98]; +} __packed; + +/* HW info */ +struct guc_gt_system_info { + u32 slice_enabled; + u32 rcs_enabled; + u32 reserved0; + u32 bcs_enabled; + u32 vdbox_enable_mask; + u32 vdbox_sfc_support_mask; + u32 vebox_enable_mask; + u32 reserved[9]; +} __packed; + +/* Clients info */ +struct guc_ct_pool_entry { + struct guc_ct_buffer_desc desc; + u32 reserved[7]; +} __packed; + +#define GUC_CT_POOL_SIZE 2 + +struct guc_clients_info { + u32 clients_num; + u32 reserved0[13]; + u32 ct_pool_addr; + u32 ct_pool_count; + u32 reserved[4]; +} __packed; + +/* GuC Additional Data Struct */ +struct guc_ads { + u32 reg_state_addr; + u32 reg_state_buffer; + u32 scheduler_policies; + u32 gt_system_info; + u32 clients_info; + u32 control_data; + u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES]; + u32 eng_state_size[GUC_MAX_ENGINE_CLASSES]; + u32 reserved[16]; +} __packed; + +/* GuC logging structures */ + +enum guc_log_buffer_type { + GUC_ISR_LOG_BUFFER, + GUC_DPC_LOG_BUFFER, + GUC_CRASH_DUMP_LOG_BUFFER, + GUC_MAX_LOG_BUFFER +}; + +/** + * struct guc_log_buffer_state - GuC log buffer state + * + * Below state structure is used for coordination of retrieval of GuC firmware + * logs. Separate state is maintained for each log buffer type. + * read_ptr points to the location where i915 read last in log buffer and + * is read only for GuC firmware. write_ptr is incremented by GuC with number + * of bytes written for each log entry and is read only for i915. + * When any type of log buffer becomes half full, GuC sends a flush interrupt. + * GuC firmware expects that while it is writing to 2nd half of the buffer, + * first half would get consumed by Host and then get a flush completed + * acknowledgment from Host, so that it does not end up doing any overwrite + * causing loss of logs. So when buffer gets half filled & i915 has requested + * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr + * to the value of write_ptr and raise the interrupt. + * On receiving the interrupt i915 should read the buffer, clear flush_to_file + * field and also update read_ptr with the value of sample_write_ptr, before + * sending an acknowledgment to GuC. marker & version fields are for internal + * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every + * time GuC detects the log buffer overflow. + */ +struct guc_log_buffer_state { + u32 marker[2]; + u32 read_ptr; + u32 write_ptr; + u32 size; + u32 sampled_write_ptr; + union { + struct { + u32 flush_to_file:1; + u32 buffer_full_cnt:4; + u32 reserved:27; + }; + u32 flags; + }; + u32 version; +} __packed; + +struct guc_ctx_report { + u32 report_return_status; + u32 reserved1[64]; + u32 affected_count; + u32 reserved2[2]; +} __packed; + +/* GuC Shared Context Data Struct */ +struct guc_shared_ctx_data { + u32 addr_of_last_preempted_data_low; + u32 addr_of_last_preempted_data_high; + u32 addr_of_last_preempted_data_high_tmp; + u32 padding; + u32 is_mapped_to_proxy; + u32 proxy_ctx_id; + u32 engine_reset_ctx_id; + u32 media_reset_count; + u32 reserved1[8]; + u32 uk_last_ctx_switch_reason; + u32 was_reset; + u32 lrca_gpu_addr; + u64 execlist_ctx; + u32 reserved2[66]; + struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM]; +} __packed; + +/** + * DOC: MMIO based communication + * + * The MMIO based communication between Host and GuC uses software scratch + * registers, where first register holds data treated as message header, + * and other registers are used to hold message payload. + * + * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, + * but no H2G command takes more than 8 parameters and the GuC FW + * itself uses an 8-element array to store the H2G message. + * + * +-----------+---------+---------+---------+ + * | MMIO[0] | MMIO[1] | ... | MMIO[n] | + * +-----------+---------+---------+---------+ + * | header | optional payload | + * +======+====+=========+=========+=========+ + * | 31:28|type| | | | + * +------+----+ | | | + * | 27:16|data| | | | + * +------+----+ | | | + * | 15:0|code| | | | + * +------+----+---------+---------+---------+ + * + * The message header consists of: + * + * - **type**, indicates message type + * - **code**, indicates message code, is specific for **type** + * - **data**, indicates message data, optional, depends on **code** + * + * The following message **types** are supported: + * + * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code + * must be priovided in **code** field. Optional action specific parameters + * can be provided in remaining payload registers or **data** field. + * + * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request, + * action response status will be provided in **code** field. Optional + * response data can be returned in remaining payload registers or **data** + * field. + */ + +#define GUC_MAX_MMIO_MSG_LEN 8 + +#define INTEL_GUC_MSG_TYPE_SHIFT 28 +#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) +#define INTEL_GUC_MSG_DATA_SHIFT 16 +#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT) +#define INTEL_GUC_MSG_CODE_SHIFT 0 +#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT) + +#define __INTEL_GUC_MSG_GET(T, m) \ + (((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT) +#define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m) +#define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m) +#define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m) + +enum intel_guc_msg_type { + INTEL_GUC_MSG_TYPE_REQUEST = 0x0, + INTEL_GUC_MSG_TYPE_RESPONSE = 0xF, +}; + +#define __INTEL_GUC_MSG_TYPE_IS(T, m) \ + (INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T) +#define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m) +#define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m) + +enum intel_guc_action { + INTEL_GUC_ACTION_DEFAULT = 0x0, + INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, + INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, + INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, + INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, + INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40, + INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, + INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, + INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, + INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, + INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005, + INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, + INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, + INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, + INTEL_GUC_ACTION_LIMIT +}; + +enum intel_guc_preempt_options { + INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4, + INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, +}; + +enum intel_guc_report_status { + INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0, + INTEL_GUC_REPORT_STATUS_ACKED = 0x1, + INTEL_GUC_REPORT_STATUS_ERROR = 0x2, + INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, +}; + +enum intel_guc_sleep_state_status { + INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1, + INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2, + INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3 +#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 +}; + +#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) +#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 +#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) +#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) + +enum intel_guc_response_status { + INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, + INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, +}; + +#define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \ + (typecheck(u32, (m)) && \ + ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \ + ((INTEL_GUC_MSG_TYPE_RESPONSE << INTEL_GUC_MSG_TYPE_SHIFT) | \ + (INTEL_GUC_RESPONSE_STATUS_SUCCESS << INTEL_GUC_MSG_CODE_SHIFT))) + +/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */ +enum intel_guc_recv_message { + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1), + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3) +}; + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c new file mode 100644 index 000000000000..caed0d57e704 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -0,0 +1,674 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#include <linux/debugfs.h> + +#include "gt/intel_gt.h" +#include "i915_drv.h" +#include "i915_memcpy.h" +#include "intel_guc_log.h" + +static void guc_log_capture_logs(struct intel_guc_log *log); + +/** + * DOC: GuC firmware log + * + * Firmware log is enabled by setting i915.guc_log_level to the positive level. + * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from + * i915_guc_load_status will print out firmware loading status and scratch + * registers value. + */ + +static int guc_action_flush_log_complete(struct intel_guc *guc) +{ + u32 action[] = { + INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static int guc_action_flush_log(struct intel_guc *guc) +{ + u32 action[] = { + INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, + 0 + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static int guc_action_control_log(struct intel_guc *guc, bool enable, + bool default_logging, u32 verbosity) +{ + u32 action[] = { + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, + (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) | + (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | + (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0) + }; + + GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) +{ + return container_of(log, struct intel_guc, log); +} + +static void guc_log_enable_flush_events(struct intel_guc_log *log) +{ + intel_guc_enable_msg(log_to_guc(log), + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); +} + +static void guc_log_disable_flush_events(struct intel_guc_log *log) +{ + intel_guc_disable_msg(log_to_guc(log), + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); +} + +/* + * Sub buffer switch callback. Called whenever relay has to switch to a new + * sub buffer, relay stays on the same sub buffer if 0 is returned. + */ +static int subbuf_start_callback(struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + size_t prev_padding) +{ + /* + * Use no-overwrite mode by default, where relay will stop accepting + * new data if there are no empty sub buffers left. + * There is no strict synchronization enforced by relay between Consumer + * and Producer. In overwrite mode, there is a possibility of getting + * inconsistent/garbled data, the producer could be writing on to the + * same sub buffer from which Consumer is reading. This can't be avoided + * unless Consumer is fast enough and can always run in tandem with + * Producer. + */ + if (relay_buf_full(buf)) + return 0; + + return 1; +} + +/* + * file_create() callback. Creates relay file in debugfs. + */ +static struct dentry *create_buf_file_callback(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + struct dentry *buf_file; + + /* + * This to enable the use of a single buffer for the relay channel and + * correspondingly have a single file exposed to User, through which + * it can collect the logs in order without any post-processing. + * Need to set 'is_global' even if parent is NULL for early logging. + */ + *is_global = 1; + + if (!parent) + return NULL; + + buf_file = debugfs_create_file(filename, mode, + parent, buf, &relay_file_operations); + if (IS_ERR(buf_file)) + return NULL; + + return buf_file; +} + +/* + * file_remove() default callback. Removes relay file in debugfs. + */ +static int remove_buf_file_callback(struct dentry *dentry) +{ + debugfs_remove(dentry); + return 0; +} + +/* relay channel callbacks */ +static struct rchan_callbacks relay_callbacks = { + .subbuf_start = subbuf_start_callback, + .create_buf_file = create_buf_file_callback, + .remove_buf_file = remove_buf_file_callback, +}; + +static void guc_move_to_next_buf(struct intel_guc_log *log) +{ + /* + * Make sure the updates made in the sub buffer are visible when + * Consumer sees the following update to offset inside the sub buffer. + */ + smp_wmb(); + + /* All data has been written, so now move the offset of sub buffer. */ + relay_reserve(log->relay.channel, log->vma->obj->base.size); + + /* Switch to the next sub buffer */ + relay_flush(log->relay.channel); +} + +static void *guc_get_write_buffer(struct intel_guc_log *log) +{ + /* + * Just get the base address of a new sub buffer and copy data into it + * ourselves. NULL will be returned in no-overwrite mode, if all sub + * buffers are full. Could have used the relay_write() to indirectly + * copy the data, but that would have been bit convoluted, as we need to + * write to only certain locations inside a sub buffer which cannot be + * done without using relay_reserve() along with relay_write(). So its + * better to use relay_reserve() alone. + */ + return relay_reserve(log->relay.channel, 0); +} + +static bool guc_check_log_buf_overflow(struct intel_guc_log *log, + enum guc_log_buffer_type type, + unsigned int full_cnt) +{ + unsigned int prev_full_cnt = log->stats[type].sampled_overflow; + bool overflow = false; + + if (full_cnt != prev_full_cnt) { + overflow = true; + + log->stats[type].overflow = full_cnt; + log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; + + if (full_cnt < prev_full_cnt) { + /* buffer_full_cnt is a 4 bit counter */ + log->stats[type].sampled_overflow += 16; + } + + dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev, + "GuC log buffer overflow\n"); + } + + return overflow; +} + +static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) +{ + switch (type) { + case GUC_ISR_LOG_BUFFER: + return ISR_BUFFER_SIZE; + case GUC_DPC_LOG_BUFFER: + return DPC_BUFFER_SIZE; + case GUC_CRASH_DUMP_LOG_BUFFER: + return CRASH_BUFFER_SIZE; + default: + MISSING_CASE(type); + } + + return 0; +} + +static void guc_read_update_log_buffer(struct intel_guc_log *log) +{ + unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; + struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; + struct guc_log_buffer_state log_buf_state_local; + enum guc_log_buffer_type type; + void *src_data, *dst_data; + bool new_overflow; + + mutex_lock(&log->relay.lock); + + if (WARN_ON(!intel_guc_log_relay_created(log))) + goto out_unlock; + + /* Get the pointer to shared GuC log buffer */ + log_buf_state = src_data = log->relay.buf_addr; + + /* Get the pointer to local buffer to store the logs */ + log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); + + if (unlikely(!log_buf_snapshot_state)) { + /* + * Used rate limited to avoid deluge of messages, logs might be + * getting consumed by User at a slow rate. + */ + DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n"); + log->relay.full_count++; + + goto out_unlock; + } + + /* Actual logs are present from the 2nd page */ + src_data += PAGE_SIZE; + dst_data += PAGE_SIZE; + + for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { + /* + * Make a copy of the state structure, inside GuC log buffer + * (which is uncached mapped), on the stack to avoid reading + * from it multiple times. + */ + memcpy(&log_buf_state_local, log_buf_state, + sizeof(struct guc_log_buffer_state)); + buffer_size = guc_get_log_buffer_size(type); + read_offset = log_buf_state_local.read_ptr; + write_offset = log_buf_state_local.sampled_write_ptr; + full_cnt = log_buf_state_local.buffer_full_cnt; + + /* Bookkeeping stuff */ + log->stats[type].flush += log_buf_state_local.flush_to_file; + new_overflow = guc_check_log_buf_overflow(log, type, full_cnt); + + /* Update the state of shared log buffer */ + log_buf_state->read_ptr = write_offset; + log_buf_state->flush_to_file = 0; + log_buf_state++; + + /* First copy the state structure in snapshot buffer */ + memcpy(log_buf_snapshot_state, &log_buf_state_local, + sizeof(struct guc_log_buffer_state)); + + /* + * The write pointer could have been updated by GuC firmware, + * after sending the flush interrupt to Host, for consistency + * set write pointer value to same value of sampled_write_ptr + * in the snapshot buffer. + */ + log_buf_snapshot_state->write_ptr = write_offset; + log_buf_snapshot_state++; + + /* Now copy the actual logs. */ + if (unlikely(new_overflow)) { + /* copy the whole buffer in case of overflow */ + read_offset = 0; + write_offset = buffer_size; + } else if (unlikely((read_offset > buffer_size) || + (write_offset > buffer_size))) { + DRM_ERROR("invalid log buffer state\n"); + /* copy whole buffer as offsets are unreliable */ + read_offset = 0; + write_offset = buffer_size; + } + + /* Just copy the newly written data */ + if (read_offset > write_offset) { + i915_memcpy_from_wc(dst_data, src_data, write_offset); + bytes_to_copy = buffer_size - read_offset; + } else { + bytes_to_copy = write_offset - read_offset; + } + i915_memcpy_from_wc(dst_data + read_offset, + src_data + read_offset, bytes_to_copy); + + src_data += buffer_size; + dst_data += buffer_size; + } + + guc_move_to_next_buf(log); + +out_unlock: + mutex_unlock(&log->relay.lock); +} + +static void capture_logs_work(struct work_struct *work) +{ + struct intel_guc_log *log = + container_of(work, struct intel_guc_log, relay.flush_work); + + guc_log_capture_logs(log); +} + +static int guc_log_map(struct intel_guc_log *log) +{ + void *vaddr; + + lockdep_assert_held(&log->relay.lock); + + if (!log->vma) + return -ENODEV; + + /* + * Create a WC (Uncached for read) vmalloc mapping of log + * buffer pages, so that we can directly get the data + * (up-to-date) from memory. + */ + vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + log->relay.buf_addr = vaddr; + + return 0; +} + +static void guc_log_unmap(struct intel_guc_log *log) +{ + lockdep_assert_held(&log->relay.lock); + + i915_gem_object_unpin_map(log->vma->obj); + log->relay.buf_addr = NULL; +} + +void intel_guc_log_init_early(struct intel_guc_log *log) +{ + mutex_init(&log->relay.lock); + INIT_WORK(&log->relay.flush_work, capture_logs_work); + log->relay.started = false; +} + +static int guc_log_relay_create(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; + struct rchan *guc_log_relay_chan; + size_t n_subbufs, subbuf_size; + int ret; + + lockdep_assert_held(&log->relay.lock); + GEM_BUG_ON(!log->vma); + + /* Keep the size of sub buffers same as shared log buffer */ + subbuf_size = log->vma->size; + + /* + * Store up to 8 snapshots, which is large enough to buffer sufficient + * boot time logs and provides enough leeway to User, in terms of + * latency, for consuming the logs from relay. Also doesn't take + * up too much memory. + */ + n_subbufs = 8; + + guc_log_relay_chan = relay_open("guc_log", + dev_priv->drm.primary->debugfs_root, + subbuf_size, n_subbufs, + &relay_callbacks, dev_priv); + if (!guc_log_relay_chan) { + DRM_ERROR("Couldn't create relay chan for GuC logging\n"); + + ret = -ENOMEM; + return ret; + } + + GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); + log->relay.channel = guc_log_relay_chan; + + return 0; +} + +static void guc_log_relay_destroy(struct intel_guc_log *log) +{ + lockdep_assert_held(&log->relay.lock); + + relay_close(log->relay.channel); + log->relay.channel = NULL; +} + +static void guc_log_capture_logs(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; + intel_wakeref_t wakeref; + + guc_read_update_log_buffer(log); + + /* + * Generally device is expected to be active only at this + * time, so get/put should be really quick. + */ + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) + guc_action_flush_log_complete(guc); +} + +static u32 __get_default_log_level(struct intel_guc_log *log) +{ + /* A negative value means "use platform/config default" */ + if (i915_modparams.guc_log_level < 0) { + return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? + GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE; + } + + if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { + DRM_WARN("Incompatible option detected: %s=%d, %s!\n", + "guc_log_level", i915_modparams.guc_log_level, + "verbosity too high"); + return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? + GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED; + } + + GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED); + GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX); + return i915_modparams.guc_log_level; +} + +int intel_guc_log_create(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct i915_vma *vma; + u32 guc_log_size; + int ret; + + GEM_BUG_ON(log->vma); + + /* + * GuC Log buffer Layout + * + * +===============================+ 00B + * | Crash dump state header | + * +-------------------------------+ 32B + * | DPC state header | + * +-------------------------------+ 64B + * | ISR state header | + * +-------------------------------+ 96B + * | | + * +===============================+ PAGE_SIZE (4KB) + * | Crash Dump logs | + * +===============================+ + CRASH_SIZE + * | DPC logs | + * +===============================+ + DPC_SIZE + * | ISR logs | + * +===============================+ + ISR_SIZE + */ + guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE + + ISR_BUFFER_SIZE; + + vma = intel_guc_allocate_vma(guc, guc_log_size); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; + } + + log->vma = vma; + + log->level = __get_default_log_level(log); + DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", + log->level, enableddisabled(log->level), + yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), + GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); + + return 0; + +err: + DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret); + return ret; +} + +void intel_guc_log_destroy(struct intel_guc_log *log) +{ + i915_vma_unpin_and_release(&log->vma, 0); +} + +int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) +{ + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; + intel_wakeref_t wakeref; + int ret = 0; + + BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); + GEM_BUG_ON(!log->vma); + + /* + * GuC is recognizing log levels starting from 0 to max, we're using 0 + * as indication that logging should be disabled. + */ + if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) + return -EINVAL; + + mutex_lock(&dev_priv->drm.struct_mutex); + + if (log->level == level) + goto out_unlock; + + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) + ret = guc_action_control_log(guc, + GUC_LOG_LEVEL_IS_VERBOSE(level), + GUC_LOG_LEVEL_IS_ENABLED(level), + GUC_LOG_LEVEL_TO_VERBOSITY(level)); + if (ret) { + DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); + goto out_unlock; + } + + log->level = level; + +out_unlock: + mutex_unlock(&dev_priv->drm.struct_mutex); + + return ret; +} + +bool intel_guc_log_relay_created(const struct intel_guc_log *log) +{ + return log->relay.buf_addr; +} + +int intel_guc_log_relay_open(struct intel_guc_log *log) +{ + int ret; + + if (!log->vma) + return -ENODEV; + + mutex_lock(&log->relay.lock); + + if (intel_guc_log_relay_created(log)) { + ret = -EEXIST; + goto out_unlock; + } + + /* + * We require SSE 4.1 for fast reads from the GuC log buffer and + * it should be present on the chipsets supporting GuC based + * submisssions. + */ + if (!i915_has_memcpy_from_wc()) { + ret = -ENXIO; + goto out_unlock; + } + + ret = guc_log_relay_create(log); + if (ret) + goto out_unlock; + + ret = guc_log_map(log); + if (ret) + goto out_relay; + + mutex_unlock(&log->relay.lock); + + return 0; + +out_relay: + guc_log_relay_destroy(log); +out_unlock: + mutex_unlock(&log->relay.lock); + + return ret; +} + +int intel_guc_log_relay_start(struct intel_guc_log *log) +{ + if (log->relay.started) + return -EEXIST; + + guc_log_enable_flush_events(log); + + /* + * When GuC is logging without us relaying to userspace, we're ignoring + * the flush notification. This means that we need to unconditionally + * flush on relay enabling, since GuC only notifies us once. + */ + queue_work(system_highpri_wq, &log->relay.flush_work); + + log->relay.started = true; + + return 0; +} + +void intel_guc_log_relay_flush(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + intel_wakeref_t wakeref; + + if (!log->relay.started) + return; + + /* + * Before initiating the forceful flush, wait for any pending/ongoing + * flush to complete otherwise forceful flush may not actually happen. + */ + flush_work(&log->relay.flush_work); + + with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref) + guc_action_flush_log(guc); + + /* GuC would have updated log buffer by now, so capture it */ + guc_log_capture_logs(log); +} + +/* + * Stops the relay log. Called from intel_guc_log_relay_close(), so no + * possibility of race with start/flush since relay_write cannot race + * relay_close. + */ +static void guc_log_relay_stop(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + + if (!log->relay.started) + return; + + guc_log_disable_flush_events(log); + intel_synchronize_irq(i915); + + flush_work(&log->relay.flush_work); + + log->relay.started = false; +} + +void intel_guc_log_relay_close(struct intel_guc_log *log) +{ + guc_log_relay_stop(log); + + mutex_lock(&log->relay.lock); + GEM_BUG_ON(!intel_guc_log_relay_created(log)); + guc_log_unmap(log); + guc_log_relay_destroy(log); + mutex_unlock(&log->relay.lock); +} + +void intel_guc_log_handle_flush_event(struct intel_guc_log *log) +{ + queue_work(system_highpri_wq, &log->relay.flush_work); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h new file mode 100644 index 000000000000..c252c022c5fc --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_GUC_LOG_H_ +#define _INTEL_GUC_LOG_H_ + +#include <linux/mutex.h> +#include <linux/relay.h> +#include <linux/workqueue.h> + +#include "intel_guc_fwif.h" +#include "i915_gem.h" + +struct intel_guc; + +#ifdef CONFIG_DRM_I915_DEBUG_GUC +#define CRASH_BUFFER_SIZE SZ_2M +#define DPC_BUFFER_SIZE SZ_8M +#define ISR_BUFFER_SIZE SZ_8M +#else +#define CRASH_BUFFER_SIZE SZ_8K +#define DPC_BUFFER_SIZE SZ_32K +#define ISR_BUFFER_SIZE SZ_32K +#endif + +/* + * While we're using plain log level in i915, GuC controls are much more... + * "elaborate"? We have a couple of bits for verbosity, separate bit for actual + * log enabling, and separate bit for default logging - which "conveniently" + * ignores the enable bit. + */ +#define GUC_LOG_LEVEL_DISABLED 0 +#define GUC_LOG_LEVEL_NON_VERBOSE 1 +#define GUC_LOG_LEVEL_IS_ENABLED(x) ((x) > GUC_LOG_LEVEL_DISABLED) +#define GUC_LOG_LEVEL_IS_VERBOSE(x) ((x) > GUC_LOG_LEVEL_NON_VERBOSE) +#define GUC_LOG_LEVEL_TO_VERBOSITY(x) ({ \ + typeof(x) _x = (x); \ + GUC_LOG_LEVEL_IS_VERBOSE(_x) ? _x - 2 : 0; \ +}) +#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2) +#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX) + +struct intel_guc_log { + u32 level; + struct i915_vma *vma; + struct { + void *buf_addr; + bool started; + struct work_struct flush_work; + struct rchan *channel; + struct mutex lock; + u32 full_count; + } relay; + /* logging related stats */ + struct { + u32 sampled_overflow; + u32 overflow; + u32 flush; + } stats[GUC_MAX_LOG_BUFFER]; +}; + +void intel_guc_log_init_early(struct intel_guc_log *log); +int intel_guc_log_create(struct intel_guc_log *log); +void intel_guc_log_destroy(struct intel_guc_log *log); + +int intel_guc_log_set_level(struct intel_guc_log *log, u32 level); +bool intel_guc_log_relay_created(const struct intel_guc_log *log); +int intel_guc_log_relay_open(struct intel_guc_log *log); +int intel_guc_log_relay_start(struct intel_guc_log *log); +void intel_guc_log_relay_flush(struct intel_guc_log *log); +void intel_guc_log_relay_close(struct intel_guc_log *log); + +void intel_guc_log_handle_flush_event(struct intel_guc_log *log); + +static inline u32 intel_guc_log_get_level(struct intel_guc_log *log) +{ + return log->level; +} + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h new file mode 100644 index 000000000000..1949346e714e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_GUC_REG_H_ +#define _INTEL_GUC_REG_H_ + +#include <linux/compiler.h> +#include <linux/types.h> + +#include "i915_reg.h" + +/* Definitions of GuC H/W registers, bits, etc */ + +#define GUC_STATUS _MMIO(0xc000) +#define GS_RESET_SHIFT 0 +#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT) +#define GS_BOOTROM_SHIFT 1 +#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) +#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) +#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT) +#define GS_UKERNEL_SHIFT 8 +#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) +#define GS_MIA_SHIFT 16 +#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) +#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT) +#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT) +#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT) +#define GS_AUTH_STATUS_SHIFT 30 +#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT) +#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT) +#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT) + +#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) +#define SOFT_SCRATCH_COUNT 16 + +#define GEN11_SOFT_SCRATCH(n) _MMIO(0x190240 + (n) * 4) +#define GEN11_SOFT_SCRATCH_COUNT 4 + +#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4) +#define UOS_RSA_SCRATCH_COUNT 64 + +#define DMA_ADDR_0_LOW _MMIO(0xc300) +#define DMA_ADDR_0_HIGH _MMIO(0xc304) +#define DMA_ADDR_1_LOW _MMIO(0xc308) +#define DMA_ADDR_1_HIGH _MMIO(0xc30c) +#define DMA_ADDRESS_SPACE_WOPCM (7 << 16) +#define DMA_ADDRESS_SPACE_GTT (8 << 16) +#define DMA_COPY_SIZE _MMIO(0xc310) +#define DMA_CTRL _MMIO(0xc314) +#define HUC_UKERNEL (1<<9) +#define UOS_MOVE (1<<4) +#define START_DMA (1<<0) +#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340) +#define GUC_WOPCM_OFFSET_VALID (1<<0) +#define HUC_LOADING_AGENT_VCR (0<<1) +#define HUC_LOADING_AGENT_GUC (1<<1) +#define GUC_WOPCM_OFFSET_SHIFT 14 +#define GUC_WOPCM_OFFSET_MASK (0x3ffff << GUC_WOPCM_OFFSET_SHIFT) +#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) + +#define HUC_STATUS2 _MMIO(0xD3B0) +#define HUC_FW_VERIFIED (1<<7) + +#define GEN11_HUC_KERNEL_LOAD_INFO _MMIO(0xC1DC) +#define HUC_LOAD_SUCCESSFUL (1 << 0) + +#define GUC_WOPCM_SIZE _MMIO(0xc050) +#define GUC_WOPCM_SIZE_LOCKED (1<<0) +#define GUC_WOPCM_SIZE_SHIFT 12 +#define GUC_WOPCM_SIZE_MASK (0xfffff << GUC_WOPCM_SIZE_SHIFT) + +#define GEN8_GT_PM_CONFIG _MMIO(0x138140) +#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) +#define GEN9_GT_PM_CONFIG _MMIO(0x13816c) +#define GT_DOORBELL_ENABLE (1<<0) + +#define GEN8_GTCR _MMIO(0x4274) +#define GEN8_GTCR_INVALIDATE (1<<0) + +#define GEN12_GUC_TLB_INV_CR _MMIO(0xcee8) +#define GEN12_GUC_TLB_INV_CR_INVALIDATE (1 << 0) + +#define GUC_ARAT_C6DIS _MMIO(0xA178) + +#define GUC_SHIM_CONTROL _MMIO(0xc064) +#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0) +#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1) +#define GUC_ENABLE_MIA_CACHING (1<<2) +#define GUC_GEN10_MSGCH_ENABLE (1<<4) +#define GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA (1<<9) +#define GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA (1<<10) +#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15) +#define GUC_GEN10_SHIM_WC_ENABLE (1<<21) + +#define GUC_SEND_INTERRUPT _MMIO(0xc4c8) +#define GUC_SEND_TRIGGER (1<<0) +#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0) + +#define GUC_NUM_DOORBELLS 256 + +/* format of the HW-monitored doorbell cacheline */ +struct guc_doorbell_info { + u32 db_status; +#define GUC_DOORBELL_DISABLED 0 +#define GUC_DOORBELL_ENABLED 1 + + u32 cookie; + u32 reserved[14]; +} __packed; + +#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) +#define GEN8_DRB_VALID (1<<0) +#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) + +#define DE_GUCRMR _MMIO(0x44054) + +#define GUC_BCS_RCS_IER _MMIO(0xC550) +#define GUC_VCS2_VCS1_IER _MMIO(0xC554) +#define GUC_WD_VECS_IER _MMIO(0xC558) +#define GUC_PM_P24C_IER _MMIO(0xC55C) + +/* GuC Interrupt Vector */ +#define GUC_INTR_GUC2HOST BIT(15) +#define GUC_INTR_EXEC_ERROR BIT(14) +#define GUC_INTR_DISPLAY_EVENT BIT(13) +#define GUC_INTR_SEM_SIG BIT(12) +#define GUC_INTR_IOMMU2GUC BIT(11) +#define GUC_INTR_DOORBELL_RANG BIT(10) +#define GUC_INTR_DMA_DONE BIT(9) +#define GUC_INTR_FATAL_ERROR BIT(8) +#define GUC_INTR_NOTIF_ERROR BIT(7) +#define GUC_INTR_SW_INT_6 BIT(6) +#define GUC_INTR_SW_INT_5 BIT(5) +#define GUC_INTR_SW_INT_4 BIT(4) +#define GUC_INTR_SW_INT_3 BIT(3) +#define GUC_INTR_SW_INT_2 BIT(2) +#define GUC_INTR_SW_INT_1 BIT(1) +#define GUC_INTR_SW_INT_0 BIT(0) + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c new file mode 100644 index 000000000000..2498c55e0ea5 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -0,0 +1,1191 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2014 Intel Corporation + */ + +#include <linux/circ_buf.h> + +#include "gem/i915_gem_context.h" +#include "gt/intel_context.h" +#include "gt/intel_engine_pm.h" +#include "gt/intel_gt.h" +#include "gt/intel_gt_pm.h" +#include "gt/intel_lrc_reg.h" +#include "gt/intel_ring.h" + +#include "intel_guc_submission.h" + +#include "i915_drv.h" +#include "i915_trace.h" + +enum { + GUC_PREEMPT_NONE = 0, + GUC_PREEMPT_INPROGRESS, + GUC_PREEMPT_FINISHED, +}; +#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 +#define GUC_PREEMPT_BREADCRUMB_BYTES \ + (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS) + +/** + * DOC: GuC-based command submission + * + * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC + * firmware is moving to an updated submission interface and we plan to + * turn submission back on when that lands. The below documentation (and related + * code) matches the old submission model and will be updated as part of the + * upgrade to the new flow. + * + * GuC client: + * A intel_guc_client refers to a submission path through GuC. Currently, there + * is only one client, which is charged with all submissions to the GuC. This + * struct is the owner of a doorbell, a process descriptor and a workqueue (all + * of them inside a single gem object that contains all required pages for these + * elements). + * + * GuC stage descriptor: + * During initialization, the driver allocates a static pool of 1024 such + * descriptors, and shares them with the GuC. + * Currently, there exists a 1:1 mapping between a intel_guc_client and a + * guc_stage_desc (via the client's stage_id), so effectively only one + * gets used. This stage descriptor lets the GuC know about the doorbell, + * workqueue and process descriptor. Theoretically, it also lets the GuC + * know about our HW contexts (context ID, etc...), but we actually + * employ a kind of submission where the GuC uses the LRCA sent via the work + * item instead (the single guc_stage_desc associated to execbuf client + * contains information about the default kernel context only, but this is + * essentially unused). This is called a "proxy" submission. + * + * The Scratch registers: + * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes + * a value to the action register (SOFT_SCRATCH_0) along with any data. It then + * triggers an interrupt on the GuC via another register write (0xC4C8). + * Firmware writes a success/fail code back to the action register after + * processes the request. The kernel driver polls waiting for this update and + * then proceeds. + * See intel_guc_send() + * + * Doorbells: + * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) + * mapped into process space. + * + * Work Items: + * There are several types of work items that the host may place into a + * workqueue, each with its own requirements and limitations. Currently only + * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which + * represents in-order queue. The kernel driver packs ring tail pointer and an + * ELSP context descriptor dword into Work Item. + * See guc_add_request() + * + */ + +static inline struct i915_priolist *to_priolist(struct rb_node *rb) +{ + return rb_entry(rb, struct i915_priolist, node); +} + +static inline bool is_high_priority(struct intel_guc_client *client) +{ + return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH || + client->priority == GUC_CLIENT_PRIORITY_HIGH); +} + +static int reserve_doorbell(struct intel_guc_client *client) +{ + unsigned long offset; + unsigned long end; + u16 id; + + GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID); + + /* + * The bitmap tracks which doorbell registers are currently in use. + * It is split into two halves; the first half is used for normal + * priority contexts, the second half for high-priority ones. + */ + offset = 0; + end = GUC_NUM_DOORBELLS / 2; + if (is_high_priority(client)) { + offset = end; + end += offset; + } + + id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset); + if (id == end) + return -ENOSPC; + + __set_bit(id, client->guc->doorbell_bitmap); + client->doorbell_id = id; + DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n", + client->stage_id, yesno(is_high_priority(client)), + id); + return 0; +} + +static bool has_doorbell(struct intel_guc_client *client) +{ + if (client->doorbell_id == GUC_DOORBELL_INVALID) + return false; + + return test_bit(client->doorbell_id, client->guc->doorbell_bitmap); +} + +static void unreserve_doorbell(struct intel_guc_client *client) +{ + GEM_BUG_ON(!has_doorbell(client)); + + __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap); + client->doorbell_id = GUC_DOORBELL_INVALID; +} + +/* + * Tell the GuC to allocate or deallocate a specific doorbell + */ + +static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id) +{ + u32 action[] = { + INTEL_GUC_ACTION_ALLOCATE_DOORBELL, + stage_id + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id) +{ + u32 action[] = { + INTEL_GUC_ACTION_DEALLOCATE_DOORBELL, + stage_id + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client) +{ + struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr; + + return &base[client->stage_id]; +} + +/* + * Initialise, update, or clear doorbell data shared with the GuC + * + * These functions modify shared data and so need access to the mapped + * client object which contains the page being used for the doorbell + */ + +static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id) +{ + struct guc_stage_desc *desc; + + /* Update the GuC's idea of the doorbell ID */ + desc = __get_stage_desc(client); + desc->db_id = new_id; +} + +static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client) +{ + return client->vaddr + client->doorbell_offset; +} + +static bool __doorbell_valid(struct intel_guc *guc, u16 db_id) +{ + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; + + GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); + return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; +} + +static void __init_doorbell(struct intel_guc_client *client) +{ + struct guc_doorbell_info *doorbell; + + doorbell = __get_doorbell(client); + doorbell->db_status = GUC_DOORBELL_ENABLED; + doorbell->cookie = 0; +} + +static void __fini_doorbell(struct intel_guc_client *client) +{ + struct guc_doorbell_info *doorbell; + u16 db_id = client->doorbell_id; + + doorbell = __get_doorbell(client); + doorbell->db_status = GUC_DOORBELL_DISABLED; + + /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit + * to go to zero after updating db_status before we call the GuC to + * release the doorbell + */ + if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10)) + WARN_ONCE(true, "Doorbell never became invalid after disable\n"); +} + +static int create_doorbell(struct intel_guc_client *client) +{ + int ret; + + if (WARN_ON(!has_doorbell(client))) + return -ENODEV; /* internal setup error, should never happen */ + + __update_doorbell_desc(client, client->doorbell_id); + __init_doorbell(client); + + ret = __guc_allocate_doorbell(client->guc, client->stage_id); + if (ret) { + __fini_doorbell(client); + __update_doorbell_desc(client, GUC_DOORBELL_INVALID); + DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", + client->stage_id, ret); + return ret; + } + + return 0; +} + +static int destroy_doorbell(struct intel_guc_client *client) +{ + int ret; + + GEM_BUG_ON(!has_doorbell(client)); + + __fini_doorbell(client); + ret = __guc_deallocate_doorbell(client->guc, client->stage_id); + if (ret) + DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", + client->stage_id, ret); + + __update_doorbell_desc(client, GUC_DOORBELL_INVALID); + + return ret; +} + +static unsigned long __select_cacheline(struct intel_guc *guc) +{ + unsigned long offset; + + /* Doorbell uses a single cache line within a page */ + offset = offset_in_page(guc->db_cacheline); + + /* Moving to next cache line to reduce contention */ + guc->db_cacheline += cache_line_size(); + + DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n", + offset, guc->db_cacheline, cache_line_size()); + return offset; +} + +static inline struct guc_process_desc * +__get_process_desc(struct intel_guc_client *client) +{ + return client->vaddr + client->proc_desc_offset; +} + +/* + * Initialise the process descriptor shared with the GuC firmware. + */ +static void guc_proc_desc_init(struct intel_guc_client *client) +{ + struct guc_process_desc *desc; + + desc = memset(__get_process_desc(client), 0, sizeof(*desc)); + + /* + * XXX: pDoorbell and WQVBaseAddress are pointers in process address + * space for ring3 clients (set them as in mmap_ioctl) or kernel + * space for kernel clients (map on demand instead? May make debug + * easier to have it mapped). + */ + desc->wq_base_addr = 0; + desc->db_base_addr = 0; + + desc->stage_id = client->stage_id; + desc->wq_size_bytes = GUC_WQ_SIZE; + desc->wq_status = WQ_STATUS_ACTIVE; + desc->priority = client->priority; +} + +static void guc_proc_desc_fini(struct intel_guc_client *client) +{ + struct guc_process_desc *desc; + + desc = __get_process_desc(client); + memset(desc, 0, sizeof(*desc)); +} + +static int guc_stage_desc_pool_create(struct intel_guc *guc) +{ + struct i915_vma *vma; + void *vaddr; + + vma = intel_guc_allocate_vma(guc, + PAGE_ALIGN(sizeof(struct guc_stage_desc) * + GUC_MAX_STAGE_DESCRIPTORS)); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + i915_vma_unpin_and_release(&vma, 0); + return PTR_ERR(vaddr); + } + + guc->stage_desc_pool = vma; + guc->stage_desc_pool_vaddr = vaddr; + ida_init(&guc->stage_ids); + + return 0; +} + +static void guc_stage_desc_pool_destroy(struct intel_guc *guc) +{ + ida_destroy(&guc->stage_ids); + i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP); +} + +/* + * Initialise/clear the stage descriptor shared with the GuC firmware. + * + * This descriptor tells the GuC where (in GGTT space) to find the important + * data structures relating to this client (doorbell, process descriptor, + * write queue, etc). + */ +static void guc_stage_desc_init(struct intel_guc_client *client) +{ + struct intel_guc *guc = client->guc; + struct guc_stage_desc *desc; + u32 gfx_addr; + + desc = __get_stage_desc(client); + memset(desc, 0, sizeof(*desc)); + + desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | + GUC_STAGE_DESC_ATTR_KERNEL; + if (is_high_priority(client)) + desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT; + desc->stage_id = client->stage_id; + desc->priority = client->priority; + desc->db_id = client->doorbell_id; + + /* + * The doorbell, process descriptor, and workqueue are all parts + * of the client object, which the GuC will reference via the GGTT + */ + gfx_addr = intel_guc_ggtt_offset(guc, client->vma); + desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + + client->doorbell_offset; + desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client)); + desc->db_trigger_uk = gfx_addr + client->doorbell_offset; + desc->process_desc = gfx_addr + client->proc_desc_offset; + desc->wq_addr = gfx_addr + GUC_DB_SIZE; + desc->wq_size = GUC_WQ_SIZE; + + desc->desc_private = ptr_to_u64(client); +} + +static void guc_stage_desc_fini(struct intel_guc_client *client) +{ + struct guc_stage_desc *desc; + + desc = __get_stage_desc(client); + memset(desc, 0, sizeof(*desc)); +} + +/* Construct a Work Item and append it to the GuC's Work Queue */ +static void guc_wq_item_append(struct intel_guc_client *client, + u32 target_engine, u32 context_desc, + u32 ring_tail, u32 fence_id) +{ + /* wqi_len is in DWords, and does not include the one-word header */ + const size_t wqi_size = sizeof(struct guc_wq_item); + const u32 wqi_len = wqi_size / sizeof(u32) - 1; + struct guc_process_desc *desc = __get_process_desc(client); + struct guc_wq_item *wqi; + u32 wq_off; + + lockdep_assert_held(&client->wq_lock); + + /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we + * should not have the case where structure wqi is across page, neither + * wrapped to the beginning. This simplifies the implementation below. + * + * XXX: if not the case, we need save data to a temp wqi and copy it to + * workqueue buffer dw by dw. + */ + BUILD_BUG_ON(wqi_size != 16); + + /* We expect the WQ to be active if we're appending items to it */ + GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE); + + /* Free space is guaranteed. */ + wq_off = READ_ONCE(desc->tail); + GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), + GUC_WQ_SIZE) < wqi_size); + GEM_BUG_ON(wq_off & (wqi_size - 1)); + + /* WQ starts from the page after doorbell / process_desc */ + wqi = client->vaddr + wq_off + GUC_DB_SIZE; + + if (I915_SELFTEST_ONLY(client->use_nop_wqi)) { + wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT); + } else { + /* Now fill in the 4-word work queue item */ + wqi->header = WQ_TYPE_INORDER | + (wqi_len << WQ_LEN_SHIFT) | + (target_engine << WQ_TARGET_SHIFT) | + WQ_NO_WCFLUSH_WAIT; + wqi->context_desc = context_desc; + wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; + GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); + wqi->fence_id = fence_id; + } + + /* Make the update visible to GuC */ + WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); +} + +static void guc_ring_doorbell(struct intel_guc_client *client) +{ + struct guc_doorbell_info *db; + u32 cookie; + + lockdep_assert_held(&client->wq_lock); + + /* pointer of current doorbell cacheline */ + db = __get_doorbell(client); + + /* + * We're not expecting the doorbell cookie to change behind our back, + * we also need to treat 0 as a reserved value. + */ + cookie = READ_ONCE(db->cookie); + WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie); + + /* XXX: doorbell was lost and need to acquire it again */ + GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); +} + +static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) +{ + struct intel_guc_client *client = guc->execbuf_client; + struct intel_engine_cs *engine = rq->engine; + u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); + u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); + + guc_wq_item_append(client, engine->guc_id, ctx_desc, + ring_tail, rq->fence.seqno); + guc_ring_doorbell(client); +} + +/* + * When we're doing submissions using regular execlists backend, writing to + * ELSP from CPU side is enough to make sure that writes to ringbuffer pages + * pinned in mappable aperture portion of GGTT are visible to command streamer. + * Writes done by GuC on our behalf are not guaranteeing such ordering, + * therefore, to ensure the flush, we're issuing a POSTING READ. + */ +static void flush_ggtt_writes(struct i915_vma *vma) +{ + struct drm_i915_private *i915 = vma->vm->i915; + + if (i915_vma_is_map_and_fenceable(vma)) + intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS); +} + +static void guc_submit(struct intel_engine_cs *engine, + struct i915_request **out, + struct i915_request **end) +{ + struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc_client *client = guc->execbuf_client; + + spin_lock(&client->wq_lock); + + do { + struct i915_request *rq = *out++; + + flush_ggtt_writes(rq->ring->vma); + guc_add_request(guc, rq); + } while (out != end); + + spin_unlock(&client->wq_lock); +} + +static inline int rq_prio(const struct i915_request *rq) +{ + return rq->sched.attr.priority | __NO_PREEMPTION; +} + +static struct i915_request *schedule_in(struct i915_request *rq, int idx) +{ + trace_i915_request_in(rq, idx); + + /* + * Currently we are not tracking the rq->context being inflight + * (ce->inflight = rq->engine). It is only used by the execlists + * backend at the moment, a similar counting strategy would be + * required if we generalise the inflight tracking. + */ + + intel_gt_pm_get(rq->engine->gt); + return i915_request_get(rq); +} + +static void schedule_out(struct i915_request *rq) +{ + trace_i915_request_out(rq); + + intel_gt_pm_put(rq->engine->gt); + i915_request_put(rq); +} + +static void __guc_dequeue(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request **first = execlists->inflight; + struct i915_request ** const last_port = first + execlists->port_mask; + struct i915_request *last = first[0]; + struct i915_request **port; + bool submit = false; + struct rb_node *rb; + + lockdep_assert_held(&engine->active.lock); + + if (last) { + if (*++first) + return; + + last = NULL; + } + + /* + * We write directly into the execlists->inflight queue and don't use + * the execlists->pending queue, as we don't have a distinct switch + * event. + */ + port = first; + while ((rb = rb_first_cached(&execlists->queue))) { + struct i915_priolist *p = to_priolist(rb); + struct i915_request *rq, *rn; + int i; + + priolist_for_each_request_consume(rq, rn, p, i) { + if (last && rq->hw_context != last->hw_context) { + if (port == last_port) + goto done; + + *port = schedule_in(last, + port - execlists->inflight); + port++; + } + + list_del_init(&rq->sched.link); + __i915_request_submit(rq); + submit = true; + last = rq; + } + + rb_erase_cached(&p->node, &execlists->queue); + i915_priolist_free(p); + } +done: + execlists->queue_priority_hint = + rb ? to_priolist(rb)->priority : INT_MIN; + if (submit) { + *port = schedule_in(last, port - execlists->inflight); + *++port = NULL; + guc_submit(engine, first, port); + } + execlists->active = execlists->inflight; +} + +static void guc_submission_tasklet(unsigned long data) +{ + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request **port, *rq; + unsigned long flags; + + spin_lock_irqsave(&engine->active.lock, flags); + + for (port = execlists->inflight; (rq = *port); port++) { + if (!i915_request_completed(rq)) + break; + + schedule_out(rq); + } + if (port != execlists->inflight) { + int idx = port - execlists->inflight; + int rem = ARRAY_SIZE(execlists->inflight) - idx; + memmove(execlists->inflight, port, rem * sizeof(*port)); + } + + __guc_dequeue(engine); + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void guc_reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + GEM_TRACE("%s\n", engine->name); + + /* + * Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its execlists->tasklet *just* as we are + * calling engine->init_hw() and also writing the ELSP. + * Turning off the execlists->tasklet until the reset is over + * prevents the race. + */ + __tasklet_disable_sync_once(&execlists->tasklet); +} + +static void +cancel_port_requests(struct intel_engine_execlists * const execlists) +{ + struct i915_request * const *port, *rq; + + /* Note we are only using the inflight and not the pending queue */ + + for (port = execlists->active; (rq = *port); port++) + schedule_out(rq); + execlists->active = + memset(execlists->inflight, 0, sizeof(execlists->inflight)); +} + +static void guc_reset(struct intel_engine_cs *engine, bool stalled) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *rq; + unsigned long flags; + + spin_lock_irqsave(&engine->active.lock, flags); + + cancel_port_requests(execlists); + + /* Push back any incomplete requests for replay after the reset. */ + rq = execlists_unwind_incomplete_requests(execlists); + if (!rq) + goto out_unlock; + + if (!i915_request_started(rq)) + stalled = false; + + __i915_request_reset(rq, stalled); + intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); + +out_unlock: + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void guc_cancel_requests(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *rq, *rn; + struct rb_node *rb; + unsigned long flags; + + GEM_TRACE("%s\n", engine->name); + + /* + * Before we call engine->cancel_requests(), we should have exclusive + * access to the submission state. This is arranged for us by the + * caller disabling the interrupt generation, the tasklet and other + * threads that may then access the same state, giving us a free hand + * to reset state. However, we still need to let lockdep be aware that + * we know this state may be accessed in hardirq context, so we + * disable the irq around this manipulation and we want to keep + * the spinlock focused on its duties and not accidentally conflate + * coverage to the submission's irq state. (Similarly, although we + * shouldn't need to disable irq around the manipulation of the + * submission's irq state, we also wish to remind ourselves that + * it is irq state.) + */ + spin_lock_irqsave(&engine->active.lock, flags); + + /* Cancel the requests on the HW and clear the ELSP tracker. */ + cancel_port_requests(execlists); + + /* Mark all executing requests as skipped. */ + list_for_each_entry(rq, &engine->active.requests, sched.link) { + if (!i915_request_signaled(rq)) + dma_fence_set_error(&rq->fence, -EIO); + + i915_request_mark_complete(rq); + } + + /* Flush the queued requests to the timeline list (for retiring). */ + while ((rb = rb_first_cached(&execlists->queue))) { + struct i915_priolist *p = to_priolist(rb); + int i; + + priolist_for_each_request_consume(rq, rn, p, i) { + list_del_init(&rq->sched.link); + __i915_request_submit(rq); + dma_fence_set_error(&rq->fence, -EIO); + i915_request_mark_complete(rq); + } + + rb_erase_cached(&p->node, &execlists->queue); + i915_priolist_free(p); + } + + /* Remaining _unready_ requests will be nop'ed when submitted */ + + execlists->queue_priority_hint = INT_MIN; + execlists->queue = RB_ROOT_CACHED; + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void guc_reset_finish(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + if (__tasklet_enable(&execlists->tasklet)) + /* And kick in case we missed a new request submission. */ + tasklet_hi_schedule(&execlists->tasklet); + + GEM_TRACE("%s: depth->%d\n", engine->name, + atomic_read(&execlists->tasklet.count)); +} + +/* + * Everything below here is concerned with setup & teardown, and is + * therefore not part of the somewhat time-critical batch-submission + * path of guc_submit() above. + */ + +/* Check that a doorbell register is in the expected state */ +static bool doorbell_ok(struct intel_guc *guc, u16 db_id) +{ + bool valid; + + GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); + + valid = __doorbell_valid(guc, db_id); + + if (test_bit(db_id, guc->doorbell_bitmap) == valid) + return true; + + DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n", + db_id, yesno(valid)); + + return false; +} + +static bool guc_verify_doorbells(struct intel_guc *guc) +{ + bool doorbells_ok = true; + u16 db_id; + + for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) + if (!doorbell_ok(guc, db_id)) + doorbells_ok = false; + + return doorbells_ok; +} + +/** + * guc_client_alloc() - Allocate an intel_guc_client + * @guc: the intel_guc structure + * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW + * The kernel client to replace ExecList submission is created with + * NORMAL priority. Priority of a client for scheduler can be HIGH, + * while a preemption context can use CRITICAL. + * + * Return: An intel_guc_client object if success, else NULL. + */ +static struct intel_guc_client * +guc_client_alloc(struct intel_guc *guc, u32 priority) +{ + struct intel_guc_client *client; + struct i915_vma *vma; + void *vaddr; + int ret; + + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return ERR_PTR(-ENOMEM); + + client->guc = guc; + client->priority = priority; + client->doorbell_id = GUC_DOORBELL_INVALID; + spin_lock_init(&client->wq_lock); + + ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, + GFP_KERNEL); + if (ret < 0) + goto err_client; + + client->stage_id = ret; + + /* The first page is doorbell/proc_desc. Two followed pages are wq. */ + vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_id; + } + + /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ + client->vma = vma; + + vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err_vma; + } + client->vaddr = vaddr; + + ret = reserve_doorbell(client); + if (ret) + goto err_vaddr; + + client->doorbell_offset = __select_cacheline(guc); + + /* + * Since the doorbell only requires a single cacheline, we can save + * space by putting the application process descriptor in the same + * page. Use the half of the page that doesn't include the doorbell. + */ + if (client->doorbell_offset >= (GUC_DB_SIZE / 2)) + client->proc_desc_offset = 0; + else + client->proc_desc_offset = (GUC_DB_SIZE / 2); + + DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n", + priority, client, client->stage_id); + DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", + client->doorbell_id, client->doorbell_offset); + + return client; + +err_vaddr: + i915_gem_object_unpin_map(client->vma->obj); +err_vma: + i915_vma_unpin_and_release(&client->vma, 0); +err_id: + ida_simple_remove(&guc->stage_ids, client->stage_id); +err_client: + kfree(client); + return ERR_PTR(ret); +} + +static void guc_client_free(struct intel_guc_client *client) +{ + unreserve_doorbell(client); + i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); + ida_simple_remove(&client->guc->stage_ids, client->stage_id); + kfree(client); +} + +static inline bool ctx_save_restore_disabled(struct intel_context *ce) +{ + u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1]; + +#define SR_DISABLED \ + _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \ + CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) + + return (sr & SR_DISABLED) == SR_DISABLED; + +#undef SR_DISABLED +} + +static int guc_clients_create(struct intel_guc *guc) +{ + struct intel_guc_client *client; + + GEM_BUG_ON(guc->execbuf_client); + + client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL); + if (IS_ERR(client)) { + DRM_ERROR("Failed to create GuC client for submission!\n"); + return PTR_ERR(client); + } + guc->execbuf_client = client; + + return 0; +} + +static void guc_clients_destroy(struct intel_guc *guc) +{ + struct intel_guc_client *client; + + client = fetch_and_zero(&guc->execbuf_client); + if (client) + guc_client_free(client); +} + +static int __guc_client_enable(struct intel_guc_client *client) +{ + int ret; + + guc_proc_desc_init(client); + guc_stage_desc_init(client); + + ret = create_doorbell(client); + if (ret) + goto fail; + + return 0; + +fail: + guc_stage_desc_fini(client); + guc_proc_desc_fini(client); + return ret; +} + +static void __guc_client_disable(struct intel_guc_client *client) +{ + /* + * By the time we're here, GuC may have already been reset. if that is + * the case, instead of trying (in vain) to communicate with it, let's + * just cleanup the doorbell HW and our internal state. + */ + if (intel_guc_is_running(client->guc)) + destroy_doorbell(client); + else + __fini_doorbell(client); + + guc_stage_desc_fini(client); + guc_proc_desc_fini(client); +} + +static int guc_clients_enable(struct intel_guc *guc) +{ + return __guc_client_enable(guc->execbuf_client); +} + +static void guc_clients_disable(struct intel_guc *guc) +{ + if (guc->execbuf_client) + __guc_client_disable(guc->execbuf_client); +} + +/* + * Set up the memory resources to be shared with the GuC (via the GGTT) + * at firmware loading time. + */ +int intel_guc_submission_init(struct intel_guc *guc) +{ + int ret; + + if (guc->stage_desc_pool) + return 0; + + ret = guc_stage_desc_pool_create(guc); + if (ret) + return ret; + /* + * Keep static analysers happy, let them know that we allocated the + * vma after testing that it didn't exist earlier. + */ + GEM_BUG_ON(!guc->stage_desc_pool); + + WARN_ON(!guc_verify_doorbells(guc)); + ret = guc_clients_create(guc); + if (ret) + goto err_pool; + + return 0; + +err_pool: + guc_stage_desc_pool_destroy(guc); + return ret; +} + +void intel_guc_submission_fini(struct intel_guc *guc) +{ + guc_clients_destroy(guc); + WARN_ON(!guc_verify_doorbells(guc)); + + if (guc->stage_desc_pool) + guc_stage_desc_pool_destroy(guc); +} + +static void guc_interrupts_capture(struct intel_gt *gt) +{ + struct intel_rps *rps = >->rps; + struct intel_uncore *uncore = gt->uncore; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int irqs; + + /* tell all command streamers to forward interrupts (but not vblank) + * to GuC + */ + irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); + for_each_engine(engine, gt, id) + ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); + + /* route USER_INTERRUPT to Host, all others are sent to GuC. */ + irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; + /* These three registers have the same bit definitions */ + intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs); + intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs); + intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs); + + /* + * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all + * (unmasked) PM interrupts to the GuC. All other bits of this + * register *disable* generation of a specific interrupt. + * + * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when + * writing to the PM interrupt mask register, i.e. interrupts + * that must not be disabled. + * + * If the GuC is handling these interrupts, then we must not let + * the PM code disable ANY interrupt that the GuC is expecting. + * So for each ENABLED (0) bit in this register, we must SET the + * bit in pm_intrmsk_mbz so that it's left enabled for the GuC. + * GuC needs ARAT expired interrupt unmasked hence it is set in + * pm_intrmsk_mbz. + * + * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will + * result in the register bit being left SET! + */ + rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; + rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; +} + +static void guc_interrupts_release(struct intel_gt *gt) +{ + struct intel_rps *rps = >->rps; + struct intel_uncore *uncore = gt->uncore; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int irqs; + + /* + * tell all command streamers NOT to forward interrupts or vblank + * to GuC. + */ + irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); + irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); + for_each_engine(engine, gt, id) + ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); + + /* route all GT interrupts to the host */ + intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0); + intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0); + intel_uncore_write(uncore, GUC_WD_VECS_IER, 0); + + rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; + rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; +} + +static void guc_set_default_submission(struct intel_engine_cs *engine) +{ + /* + * We inherit a bunch of functions from execlists that we'd like + * to keep using: + * + * engine->submit_request = execlists_submit_request; + * engine->cancel_requests = execlists_cancel_requests; + * engine->schedule = execlists_schedule; + * + * But we need to override the actual submission backend in order + * to talk to the GuC. + */ + intel_execlists_set_default_submission(engine); + + engine->execlists.tasklet.func = guc_submission_tasklet; + + /* do not use execlists park/unpark */ + engine->park = engine->unpark = NULL; + + engine->reset.prepare = guc_reset_prepare; + engine->reset.reset = guc_reset; + engine->reset.finish = guc_reset_finish; + + engine->cancel_requests = guc_cancel_requests; + + engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; + engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; + + /* + * For the breadcrumb irq to work we need the interrupts to stay + * enabled. However, on all platforms on which we'll have support for + * GuC submission we don't allow disabling the interrupts at runtime, so + * we're always safe with the current flow. + */ + GEM_BUG_ON(engine->irq_enable || engine->irq_disable); +} + +int intel_guc_submission_enable(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + err = i915_inject_probe_error(gt->i915, -ENXIO); + if (err) + return err; + + /* + * We're using GuC work items for submitting work through GuC. Since + * we're coalescing multiple requests from a single context into a + * single work item prior to assigning it to execlist_port, we can + * never have more work items than the total number of ports (for all + * engines). The GuC firmware is controlling the HEAD of work queue, + * and it is guaranteed that it will remove the work item from the + * queue before our request is completed. + */ + BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) * + sizeof(struct guc_wq_item) * + I915_NUM_ENGINES > GUC_WQ_SIZE); + + GEM_BUG_ON(!guc->execbuf_client); + + err = guc_clients_enable(guc); + if (err) + return err; + + /* Take over from manual control of ELSP (execlists) */ + guc_interrupts_capture(gt); + + for_each_engine(engine, gt, id) { + engine->set_default_submission = guc_set_default_submission; + engine->set_default_submission(engine); + } + + return 0; +} + +void intel_guc_submission_disable(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + GEM_BUG_ON(gt->awake); /* GT should be parked first */ + + guc_interrupts_release(gt); + guc_clients_disable(guc); +} + +static bool __guc_submission_support(struct intel_guc *guc) +{ + /* XXX: GuC submission is unavailable for now */ + return false; + + if (!intel_guc_is_supported(guc)) + return false; + + return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; +} + +void intel_guc_submission_init_early(struct intel_guc *guc) +{ + guc->submission_supported = __guc_submission_support(guc); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_guc.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h new file mode 100644 index 000000000000..54d716828352 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_GUC_SUBMISSION_H_ +#define _INTEL_GUC_SUBMISSION_H_ + +#include <linux/spinlock.h> + +#include "gt/intel_engine_types.h" + +#include "i915_gem.h" +#include "i915_selftest.h" + +struct drm_i915_private; + +/* + * This structure primarily describes the GEM object shared with the GuC. + * The specs sometimes refer to this object as a "GuC context", but we use + * the term "client" to avoid confusion with hardware contexts. This + * GEM object is held for the entire lifetime of our interaction with + * the GuC, being allocated before the GuC is loaded with its firmware. + * Because there's no way to update the address used by the GuC after + * initialisation, the shared object must stay pinned into the GGTT as + * long as the GuC is in use. We also keep the first page (only) mapped + * into kernel address space, as it includes shared data that must be + * updated on every request submission. + * + * The single GEM object described here is actually made up of several + * separate areas, as far as the GuC is concerned. The first page (kept + * kmap'd) includes the "process descriptor" which holds sequence data for + * the doorbell, and one cacheline which actually *is* the doorbell; a + * write to this will "ring the doorbell" (i.e. send an interrupt to the + * GuC). The subsequent pages of the client object constitute the work + * queue (a circular array of work items), again described in the process + * descriptor. Work queue pages are mapped momentarily as required. + */ +struct intel_guc_client { + struct i915_vma *vma; + void *vaddr; + struct intel_guc *guc; + + /* bitmap of (host) engine ids */ + u32 priority; + u32 stage_id; + u32 proc_desc_offset; + + u16 doorbell_id; + unsigned long doorbell_offset; + + /* Protects GuC client's WQ access */ + spinlock_t wq_lock; + + /* For testing purposes, use nop WQ items instead of real ones */ + I915_SELFTEST_DECLARE(bool use_nop_wqi); +}; + +void intel_guc_submission_init_early(struct intel_guc *guc); +int intel_guc_submission_init(struct intel_guc *guc); +int intel_guc_submission_enable(struct intel_guc *guc); +void intel_guc_submission_disable(struct intel_guc *guc); +void intel_guc_submission_fini(struct intel_guc *guc); +int intel_guc_preempt_work_create(struct intel_guc *guc); +void intel_guc_preempt_work_destroy(struct intel_guc *guc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c new file mode 100644 index 000000000000..32a069841c14 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2016-2019 Intel Corporation + */ + +#include <linux/types.h> + +#include "gt/intel_gt.h" +#include "intel_huc.h" +#include "i915_drv.h" + +/** + * DOC: HuC + * + * The HuC is a dedicated microcontroller for usage in media HEVC (High + * Efficiency Video Coding) operations. Userspace can directly use the firmware + * capabilities by adding HuC specific commands to batch buffers. + * + * The kernel driver is only responsible for loading the HuC firmware and + * triggering its security authentication, which is performed by the GuC. For + * The GuC to correctly perform the authentication, the HuC binary must be + * loaded before the GuC one. Loading the HuC is optional; however, not using + * the HuC might negatively impact power usage and/or performance of media + * workloads, depending on the use-cases. + * + * See https://github.com/intel/media-driver for the latest details on HuC + * functionality. + */ + +/** + * DOC: HuC Memory Management + * + * Similarly to the GuC, the HuC can't do any memory allocations on its own, + * with the difference being that the allocations for HuC usage are handled by + * the userspace driver instead of the kernel one. The HuC accesses the memory + * via the PPGTT belonging to the context loaded on the VCS executing the + * HuC-specific commands. + */ + +void intel_huc_init_early(struct intel_huc *huc) +{ + struct drm_i915_private *i915 = huc_to_gt(huc)->i915; + + intel_huc_fw_init_early(huc); + + if (INTEL_GEN(i915) >= 11) { + huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO; + huc->status.mask = HUC_LOAD_SUCCESSFUL; + huc->status.value = HUC_LOAD_SUCCESSFUL; + } else { + huc->status.reg = HUC_STATUS2; + huc->status.mask = HUC_FW_VERIFIED; + huc->status.value = HUC_FW_VERIFIED; + } +} + +static int intel_huc_rsa_data_create(struct intel_huc *huc) +{ + struct intel_gt *gt = huc_to_gt(huc); + struct intel_guc *guc = >->uc.guc; + struct i915_vma *vma; + size_t copied; + void *vaddr; + int err; + + err = i915_inject_probe_error(gt->i915, -ENXIO); + if (err) + return err; + + /* + * HuC firmware will sit above GUC_GGTT_TOP and will not map + * through GTT. Unfortunately, this means GuC cannot perform + * the HuC auth. as the rsa offset now falls within the GuC + * inaccessible range. We resort to perma-pinning an additional + * vma within the accessible range that only contains the rsa + * signature. The GuC can use this extra pinning to perform + * the authentication since its GGTT offset will be GuC + * accessible. + */ + GEM_BUG_ON(huc->fw.rsa_size > PAGE_SIZE); + vma = intel_guc_allocate_vma(guc, PAGE_SIZE); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + i915_vma_unpin_and_release(&vma, 0); + return PTR_ERR(vaddr); + } + + copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size); + GEM_BUG_ON(copied < huc->fw.rsa_size); + + i915_gem_object_unpin_map(vma->obj); + + huc->rsa_data = vma; + + return 0; +} + +static void intel_huc_rsa_data_destroy(struct intel_huc *huc) +{ + i915_vma_unpin_and_release(&huc->rsa_data, 0); +} + +int intel_huc_init(struct intel_huc *huc) +{ + struct drm_i915_private *i915 = huc_to_gt(huc)->i915; + int err; + + err = intel_uc_fw_init(&huc->fw); + if (err) + goto out; + + /* + * HuC firmware image is outside GuC accessible range. + * Copy the RSA signature out of the image into + * a perma-pinned region set aside for it + */ + err = intel_huc_rsa_data_create(huc); + if (err) + goto out_fini; + + return 0; + +out_fini: + intel_uc_fw_fini(&huc->fw); +out: + intel_uc_fw_cleanup_fetch(&huc->fw); + DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err); + return err; +} + +void intel_huc_fini(struct intel_huc *huc) +{ + if (!intel_uc_fw_is_available(&huc->fw)) + return; + + intel_huc_rsa_data_destroy(huc); + intel_uc_fw_fini(&huc->fw); +} + +/** + * intel_huc_auth() - Authenticate HuC uCode + * @huc: intel_huc structure + * + * Called after HuC and GuC firmware loading during intel_uc_init_hw(). + * + * This function invokes the GuC action to authenticate the HuC firmware, + * passing the offset of the RSA signature to intel_guc_auth_huc(). It then + * waits for up to 50ms for firmware verification ACK. + */ +int intel_huc_auth(struct intel_huc *huc) +{ + struct intel_gt *gt = huc_to_gt(huc); + struct intel_guc *guc = >->uc.guc; + int ret; + + GEM_BUG_ON(intel_huc_is_authenticated(huc)); + + if (!intel_uc_fw_is_loaded(&huc->fw)) + return -ENOEXEC; + + ret = i915_inject_probe_error(gt->i915, -ENXIO); + if (ret) + goto fail; + + ret = intel_guc_auth_huc(guc, + intel_guc_ggtt_offset(guc, huc->rsa_data)); + if (ret) { + DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret); + goto fail; + } + + /* Check authentication status, it should be done by now */ + ret = __intel_wait_for_register(gt->uncore, + huc->status.reg, + huc->status.mask, + huc->status.value, + 2, 50, NULL); + if (ret) { + DRM_ERROR("HuC: Firmware not verified %d\n", ret); + goto fail; + } + + intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); + return 0; + +fail: + i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret); + intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_FAIL); + return ret; +} + +/** + * intel_huc_check_status() - check HuC status + * @huc: intel_huc structure + * + * This function reads status register to verify if HuC + * firmware was successfully loaded. + * + * Returns: 1 if HuC firmware is loaded and verified, + * 0 if HuC firmware is not loaded and -ENODEV if HuC + * is not present on this platform. + */ +int intel_huc_check_status(struct intel_huc *huc) +{ + struct intel_gt *gt = huc_to_gt(huc); + intel_wakeref_t wakeref; + u32 status = 0; + + if (!intel_huc_is_supported(huc)) + return -ENODEV; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + status = intel_uncore_read(gt->uncore, huc->status.reg); + + return (status & huc->status.mask) == huc->status.value; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h new file mode 100644 index 000000000000..644c059fe01d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_HUC_H_ +#define _INTEL_HUC_H_ + +#include "i915_reg.h" +#include "intel_uc_fw.h" +#include "intel_huc_fw.h" + +struct intel_huc { + /* Generic uC firmware management */ + struct intel_uc_fw fw; + + /* HuC-specific additions */ + struct i915_vma *rsa_data; + + struct { + i915_reg_t reg; + u32 mask; + u32 value; + } status; +}; + +void intel_huc_init_early(struct intel_huc *huc); +int intel_huc_init(struct intel_huc *huc); +void intel_huc_fini(struct intel_huc *huc); +int intel_huc_auth(struct intel_huc *huc); +int intel_huc_check_status(struct intel_huc *huc); + +static inline int intel_huc_sanitize(struct intel_huc *huc) +{ + intel_uc_fw_sanitize(&huc->fw); + return 0; +} + +static inline bool intel_huc_is_supported(struct intel_huc *huc) +{ + return intel_uc_fw_is_supported(&huc->fw); +} + +static inline bool intel_huc_is_enabled(struct intel_huc *huc) +{ + return intel_uc_fw_is_enabled(&huc->fw); +} + +static inline bool intel_huc_is_authenticated(struct intel_huc *huc) +{ + return intel_uc_fw_is_running(&huc->fw); +} + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c new file mode 100644 index 000000000000..d654340d4d03 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#include "gt/intel_gt.h" +#include "intel_huc_fw.h" +#include "i915_drv.h" + +/** + * intel_huc_fw_init_early() - initializes HuC firmware struct + * @huc: intel_huc struct + * + * On platforms with HuC selects firmware for uploading + */ +void intel_huc_fw_init_early(struct intel_huc *huc) +{ + struct intel_gt *gt = huc_to_gt(huc); + struct intel_uc *uc = >->uc; + struct drm_i915_private *i915 = gt->i915; + + intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, + intel_uc_uses_guc(uc), + INTEL_INFO(i915)->platform, INTEL_REVID(i915)); +} + +/** + * intel_huc_fw_upload() - load HuC uCode to device + * @huc: intel_huc structure + * + * Called from intel_uc_init_hw() during driver load, resume from sleep and + * after a GPU reset. Note that HuC must be loaded before GuC. + * + * The firmware image should have already been fetched into memory, so only + * check that fetch succeeded, and then transfer the image to the h/w. + * + * Return: non-zero code on error + */ +int intel_huc_fw_upload(struct intel_huc *huc) +{ + /* HW doesn't look at destination address for HuC, so set it to 0 */ + return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h new file mode 100644 index 000000000000..b791269ce923 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_HUC_FW_H_ +#define _INTEL_HUC_FW_H_ + +struct intel_huc; + +void intel_huc_fw_init_early(struct intel_huc *huc); +int intel_huc_fw_upload(struct intel_huc *huc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c new file mode 100644 index 000000000000..629b19377a29 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -0,0 +1,633 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2016-2019 Intel Corporation + */ + +#include "gt/intel_gt.h" +#include "gt/intel_reset.h" +#include "intel_guc.h" +#include "intel_guc_ads.h" +#include "intel_guc_submission.h" +#include "intel_uc.h" + +#include "i915_drv.h" + +/* Reset GuC providing us with fresh state for both GuC and HuC. + */ +static int __intel_uc_reset_hw(struct intel_uc *uc) +{ + struct intel_gt *gt = uc_to_gt(uc); + int ret; + u32 guc_status; + + ret = i915_inject_probe_error(gt->i915, -ENXIO); + if (ret) + return ret; + + ret = intel_reset_guc(gt); + if (ret) { + DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); + return ret; + } + + guc_status = intel_uncore_read(gt->uncore, GUC_STATUS); + WARN(!(guc_status & GS_MIA_IN_RESET), + "GuC status: 0x%x, MIA core expected to be in reset\n", + guc_status); + + return ret; +} + +static void __confirm_options(struct intel_uc *uc) +{ + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + + DRM_DEV_DEBUG_DRIVER(i915->drm.dev, + "enable_guc=%d (guc:%s submission:%s huc:%s)\n", + i915_modparams.enable_guc, + yesno(intel_uc_uses_guc(uc)), + yesno(intel_uc_uses_guc_submission(uc)), + yesno(intel_uc_uses_huc(uc))); + + if (i915_modparams.enable_guc == -1) + return; + + if (i915_modparams.enable_guc == 0) { + GEM_BUG_ON(intel_uc_uses_guc(uc)); + GEM_BUG_ON(intel_uc_uses_guc_submission(uc)); + GEM_BUG_ON(intel_uc_uses_huc(uc)); + return; + } + + if (!intel_uc_supports_guc(uc)) + dev_info(i915->drm.dev, + "Incompatible option enable_guc=%d - %s\n", + i915_modparams.enable_guc, "GuC is not supported!"); + + if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC && + !intel_uc_supports_huc(uc)) + dev_info(i915->drm.dev, + "Incompatible option enable_guc=%d - %s\n", + i915_modparams.enable_guc, "HuC is not supported!"); + + if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION && + !intel_uc_supports_guc_submission(uc)) + dev_info(i915->drm.dev, + "Incompatible option enable_guc=%d - %s\n", + i915_modparams.enable_guc, "GuC submission is N/A"); + + if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION | + ENABLE_GUC_LOAD_HUC)) + dev_info(i915->drm.dev, + "Incompatible option enable_guc=%d - %s\n", + i915_modparams.enable_guc, "undocumented flag"); +} + +void intel_uc_init_early(struct intel_uc *uc) +{ + intel_guc_init_early(&uc->guc); + intel_huc_init_early(&uc->huc); + + __confirm_options(uc); +} + +void intel_uc_driver_late_release(struct intel_uc *uc) +{ +} + +/** + * intel_uc_init_mmio - setup uC MMIO access + * @uc: the intel_uc structure + * + * Setup minimal state necessary for MMIO accesses later in the + * initialization sequence. + */ +void intel_uc_init_mmio(struct intel_uc *uc) +{ + intel_guc_init_send_regs(&uc->guc); +} + +static void __uc_capture_load_err_log(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + if (guc->log.vma && !uc->load_err_log) + uc->load_err_log = i915_gem_object_get(guc->log.vma->obj); +} + +static void __uc_free_load_err_log(struct intel_uc *uc) +{ + struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log); + + if (log) + i915_gem_object_put(log); +} + +/* + * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 + * register using the same bits used in the CT message payload. Since our + * communication channel with guc is turned off at this point, we can save the + * message and handle it after we turn it back on. + */ +static void guc_clear_mmio_msg(struct intel_guc *guc) +{ + intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0); +} + +static void guc_get_mmio_msg(struct intel_guc *guc) +{ + u32 val; + + spin_lock_irq(&guc->irq_lock); + + val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)); + guc->mmio_msg |= val & guc->msg_enabled_mask; + + /* + * clear all events, including the ones we're not currently servicing, + * to make sure we don't try to process a stale message if we enable + * handling of more events later. + */ + guc_clear_mmio_msg(guc); + + spin_unlock_irq(&guc->irq_lock); +} + +static void guc_handle_mmio_msg(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + + /* we need communication to be enabled to reply to GuC */ + GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop); + + if (!guc->mmio_msg) + return; + + spin_lock_irq(&i915->irq_lock); + intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); + spin_unlock_irq(&i915->irq_lock); + + guc->mmio_msg = 0; +} + +static void guc_reset_interrupts(struct intel_guc *guc) +{ + guc->interrupts.reset(guc); +} + +static void guc_enable_interrupts(struct intel_guc *guc) +{ + guc->interrupts.enable(guc); +} + +static void guc_disable_interrupts(struct intel_guc *guc) +{ + guc->interrupts.disable(guc); +} + +static inline bool guc_communication_enabled(struct intel_guc *guc) +{ + return guc->send != intel_guc_send_nop; +} + +static int guc_enable_communication(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + int ret; + + GEM_BUG_ON(guc_communication_enabled(guc)); + + ret = i915_inject_probe_error(i915, -ENXIO); + if (ret) + return ret; + + ret = intel_guc_ct_enable(&guc->ct); + if (ret) + return ret; + + guc->send = intel_guc_send_ct; + guc->handler = intel_guc_to_host_event_handler_ct; + + /* check for mmio messages received before/during the CT enable */ + guc_get_mmio_msg(guc); + guc_handle_mmio_msg(guc); + + guc_enable_interrupts(guc); + + /* check for CT messages received before we enabled interrupts */ + spin_lock_irq(&i915->irq_lock); + intel_guc_to_host_event_handler_ct(guc); + spin_unlock_irq(&i915->irq_lock); + + DRM_INFO("GuC communication enabled\n"); + + return 0; +} + +static void __guc_stop_communication(struct intel_guc *guc) +{ + /* + * Events generated during or after CT disable are logged by guc in + * via mmio. Make sure the register is clear before disabling CT since + * all events we cared about have already been processed via CT. + */ + guc_clear_mmio_msg(guc); + + guc_disable_interrupts(guc); + + guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; +} + +static void guc_stop_communication(struct intel_guc *guc) +{ + intel_guc_ct_stop(&guc->ct); + + __guc_stop_communication(guc); + + DRM_INFO("GuC communication stopped\n"); +} + +static void guc_disable_communication(struct intel_guc *guc) +{ + __guc_stop_communication(guc); + + intel_guc_ct_disable(&guc->ct); + + /* + * Check for messages received during/after the CT disable. We do not + * expect any messages to have arrived via CT between the interrupt + * disable and the CT disable because GuC should've been idle until we + * triggered the CT disable protocol. + */ + guc_get_mmio_msg(guc); + + DRM_INFO("GuC communication disabled\n"); +} + +void intel_uc_fetch_firmwares(struct intel_uc *uc) +{ + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + int err; + + if (!intel_uc_uses_guc(uc)) + return; + + err = intel_uc_fw_fetch(&uc->guc.fw, i915); + if (err) + return; + + if (intel_uc_uses_huc(uc)) + intel_uc_fw_fetch(&uc->huc.fw, i915); +} + +void intel_uc_cleanup_firmwares(struct intel_uc *uc) +{ + if (!intel_uc_uses_guc(uc)) + return; + + if (intel_uc_uses_huc(uc)) + intel_uc_fw_cleanup_fetch(&uc->huc.fw); + + intel_uc_fw_cleanup_fetch(&uc->guc.fw); +} + +void intel_uc_init(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; + int ret; + + if (!intel_uc_uses_guc(uc)) + return; + + /* XXX: GuC submission is unavailable for now */ + GEM_BUG_ON(intel_uc_supports_guc_submission(uc)); + + ret = intel_guc_init(guc); + if (ret) { + intel_uc_fw_cleanup_fetch(&huc->fw); + return; + } + + if (intel_uc_uses_huc(uc)) + intel_huc_init(huc); +} + +void intel_uc_fini(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + if (!intel_uc_uses_guc(uc)) + return; + + if (intel_uc_uses_huc(uc)) + intel_huc_fini(&uc->huc); + + intel_guc_fini(guc); + + __uc_free_load_err_log(uc); +} + +static int __uc_sanitize(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; + + GEM_BUG_ON(!intel_uc_supports_guc(uc)); + + intel_huc_sanitize(huc); + intel_guc_sanitize(guc); + + return __intel_uc_reset_hw(uc); +} + +void intel_uc_sanitize(struct intel_uc *uc) +{ + if (!intel_uc_supports_guc(uc)) + return; + + __uc_sanitize(uc); +} + +/* Initialize and verify the uC regs related to uC positioning in WOPCM */ +static int uc_init_wopcm(struct intel_uc *uc) +{ + struct intel_gt *gt = uc_to_gt(uc); + struct intel_uncore *uncore = gt->uncore; + u32 base = intel_wopcm_guc_base(>->i915->wopcm); + u32 size = intel_wopcm_guc_size(>->i915->wopcm); + u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0; + u32 mask; + int err; + + if (unlikely(!base || !size)) { + i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n"); + return -E2BIG; + } + + GEM_BUG_ON(!intel_uc_supports_guc(uc)); + GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK)); + GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); + GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); + GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); + + err = i915_inject_probe_error(gt->i915, -ENXIO); + if (err) + return err; + + mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; + err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask, + size | GUC_WOPCM_SIZE_LOCKED); + if (err) + goto err_out; + + mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; + err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET, + base | huc_agent, mask, + base | huc_agent | + GUC_WOPCM_OFFSET_VALID); + if (err) + goto err_out; + + return 0; + +err_out: + i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n"); + i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET", + i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET), + intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); + i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE", + i915_mmio_reg_offset(GUC_WOPCM_SIZE), + intel_uncore_read(uncore, GUC_WOPCM_SIZE)); + + return err; +} + +static bool uc_is_wopcm_locked(struct intel_uc *uc) +{ + struct intel_gt *gt = uc_to_gt(uc); + struct intel_uncore *uncore = gt->uncore; + + return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) || + (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID); +} + +int intel_uc_init_hw(struct intel_uc *uc) +{ + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; + int ret, attempts; + + if (!intel_uc_supports_guc(uc)) + return 0; + + /* + * We can silently continue without GuC only if it was never enabled + * before on this system after reboot, otherwise we risk GPU hangs. + * To check if GuC was loaded before we look at WOPCM registers. + */ + if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc)) + return 0; + + if (!intel_uc_fw_is_available(&guc->fw)) { + ret = uc_is_wopcm_locked(uc) || + intel_uc_fw_is_overridden(&guc->fw) || + intel_uc_supports_guc_submission(uc) ? + intel_uc_fw_status_to_error(guc->fw.status) : 0; + goto err_out; + } + + ret = uc_init_wopcm(uc); + if (ret) + goto err_out; + + guc_reset_interrupts(guc); + + /* WaEnableuKernelHeaderValidFix:skl */ + /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ + if (IS_GEN(i915, 9)) + attempts = 3; + else + attempts = 1; + + while (attempts--) { + /* + * Always reset the GuC just before (re)loading, so + * that the state and timing are fairly predictable + */ + ret = __uc_sanitize(uc); + if (ret) + goto err_out; + + intel_huc_fw_upload(huc); + intel_guc_ads_reset(guc); + intel_guc_write_params(guc); + ret = intel_guc_fw_upload(guc); + if (ret == 0) + break; + + DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " + "retry %d more time(s)\n", ret, attempts); + } + + /* Did we succeded or run out of retries? */ + if (ret) + goto err_log_capture; + + ret = guc_enable_communication(guc); + if (ret) + goto err_log_capture; + + intel_huc_auth(huc); + + ret = intel_guc_sample_forcewake(guc); + if (ret) + goto err_communication; + + if (intel_uc_supports_guc_submission(uc)) { + ret = intel_guc_submission_enable(guc); + if (ret) + goto err_communication; + } + + dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n", + intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path, + guc->fw.major_ver_found, guc->fw.minor_ver_found, + "submission", + enableddisabled(intel_uc_supports_guc_submission(uc))); + + if (intel_uc_uses_huc(uc)) { + dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n", + intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC), + huc->fw.path, + huc->fw.major_ver_found, huc->fw.minor_ver_found, + "authenticated", + yesno(intel_huc_is_authenticated(huc))); + } + + return 0; + + /* + * We've failed to load the firmware :( + */ +err_communication: + guc_disable_communication(guc); +err_log_capture: + __uc_capture_load_err_log(uc); +err_out: + __uc_sanitize(uc); + + if (!ret) { + dev_notice(i915->drm.dev, "GuC is uninitialized\n"); + /* We want to run without GuC submission */ + return 0; + } + + i915_probe_error(i915, "GuC initialization failed %d\n", ret); + + /* We want to keep KMS alive */ + return -EIO; +} + +void intel_uc_fini_hw(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + if (!intel_guc_is_running(guc)) + return; + + if (intel_uc_supports_guc_submission(uc)) + intel_guc_submission_disable(guc); + + if (guc_communication_enabled(guc)) + guc_disable_communication(guc); + + __uc_sanitize(uc); +} + +/** + * intel_uc_reset_prepare - Prepare for reset + * @uc: the intel_uc structure + * + * Preparing for full gpu reset. + */ +void intel_uc_reset_prepare(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + if (!intel_guc_is_running(guc)) + return; + + guc_stop_communication(guc); + __uc_sanitize(uc); +} + +void intel_uc_runtime_suspend(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + int err; + + if (!intel_guc_is_running(guc)) + return; + + err = intel_guc_suspend(guc); + if (err) + DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); + + guc_disable_communication(guc); +} + +void intel_uc_suspend(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + intel_wakeref_t wakeref; + + if (!intel_guc_is_running(guc)) + return; + + with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref) + intel_uc_runtime_suspend(uc); +} + +static int __uc_resume(struct intel_uc *uc, bool enable_communication) +{ + struct intel_guc *guc = &uc->guc; + int err; + + if (!intel_guc_is_running(guc)) + return 0; + + /* Make sure we enable communication if and only if it's disabled */ + GEM_BUG_ON(enable_communication == guc_communication_enabled(guc)); + + if (enable_communication) + guc_enable_communication(guc); + + err = intel_guc_resume(guc); + if (err) { + DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err); + return err; + } + + return 0; +} + +int intel_uc_resume(struct intel_uc *uc) +{ + /* + * When coming out of S3/S4 we sanitize and re-init the HW, so + * communication is already re-enabled at this point. + */ + return __uc_resume(uc, false); +} + +int intel_uc_runtime_resume(struct intel_uc *uc) +{ + /* + * During runtime resume we don't sanitize, so we need to re-init + * communication as well. + */ + return __uc_resume(uc, true); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h new file mode 100644 index 000000000000..527995c21196 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_UC_H_ +#define _INTEL_UC_H_ + +#include "intel_guc.h" +#include "intel_huc.h" +#include "i915_params.h" + +struct intel_uc { + struct intel_guc guc; + struct intel_huc huc; + + /* Snapshot of GuC log from last failed load */ + struct drm_i915_gem_object *load_err_log; +}; + +void intel_uc_init_early(struct intel_uc *uc); +void intel_uc_driver_late_release(struct intel_uc *uc); +void intel_uc_init_mmio(struct intel_uc *uc); +void intel_uc_fetch_firmwares(struct intel_uc *uc); +void intel_uc_cleanup_firmwares(struct intel_uc *uc); +void intel_uc_sanitize(struct intel_uc *uc); +void intel_uc_init(struct intel_uc *uc); +int intel_uc_init_hw(struct intel_uc *uc); +void intel_uc_fini_hw(struct intel_uc *uc); +void intel_uc_fini(struct intel_uc *uc); +void intel_uc_reset_prepare(struct intel_uc *uc); +void intel_uc_suspend(struct intel_uc *uc); +void intel_uc_runtime_suspend(struct intel_uc *uc); +int intel_uc_resume(struct intel_uc *uc); +int intel_uc_runtime_resume(struct intel_uc *uc); + +static inline bool intel_uc_supports_guc(struct intel_uc *uc) +{ + return intel_guc_is_supported(&uc->guc); +} + +static inline bool intel_uc_uses_guc(struct intel_uc *uc) +{ + return intel_guc_is_enabled(&uc->guc); +} + +static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc) +{ + return intel_guc_is_submission_supported(&uc->guc); +} + +static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc) +{ + return intel_guc_is_submission_supported(&uc->guc); +} + +static inline bool intel_uc_supports_huc(struct intel_uc *uc) +{ + return intel_uc_supports_guc(uc); +} + +static inline bool intel_uc_uses_huc(struct intel_uc *uc) +{ + return intel_huc_is_enabled(&uc->huc); +} + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c new file mode 100644 index 000000000000..66a30ab7044a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -0,0 +1,610 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2016-2019 Intel Corporation + */ + +#include <linux/bitfield.h> +#include <linux/firmware.h> +#include <drm/drm_print.h> + +#include "intel_uc_fw.h" +#include "intel_uc_fw_abi.h" +#include "i915_drv.h" + +#ifdef CONFIG_DRM_I915_DEBUG_GUC +static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw) +{ + GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED); + if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) + return container_of(uc_fw, struct intel_gt, uc.guc.fw); + + GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC); + return container_of(uc_fw, struct intel_gt, uc.huc.fw); +} + +void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_status status) +{ + uc_fw->__status = status; + DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev, + "%s firmware -> %s\n", + intel_uc_fw_type_repr(uc_fw->type), + status == INTEL_UC_FIRMWARE_SELECTED ? + uc_fw->path : intel_uc_fw_status_repr(status)); +} +#endif + +/* + * List of required GuC and HuC binaries per-platform. + * Must be ordered based on platform + revid, from newer to older. + * + * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas + * between 33.0 and 35.2 are only related to new additions to support new Gen12 + * features. + */ +#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ + fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \ + fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ + fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \ + fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \ + fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 2, 0, 0)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 2, 0, 0)) + +#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \ + "i915/" \ + __stringify(prefix_) name_ \ + __stringify(major_) "." \ + __stringify(minor_) "." \ + __stringify(patch_) ".bin" + +#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \ + __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_) + +#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \ + __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_) + +/* All blobs need to be declared via MODULE_FIRMWARE() */ +#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \ + MODULE_FIRMWARE(guc_); \ + MODULE_FIRMWARE(huc_); + +INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH) + +/* The below structs and macros are used to iterate across the list of blobs */ +struct __packed uc_fw_blob { + u8 major; + u8 minor; + const char *path; +}; + +#define UC_FW_BLOB(major_, minor_, path_) \ + { .major = major_, .minor = minor_, .path = path_ } + +#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \ + UC_FW_BLOB(major_, minor_, \ + MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_)) + +#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \ + UC_FW_BLOB(major_, minor_, \ + MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_)) + +struct __packed uc_fw_platform_requirement { + enum intel_platform p; + u8 rev; /* first platform rev using this FW */ + const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES]; +}; + +#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \ +{ \ + .p = INTEL_##platform_, \ + .rev = revid_, \ + .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \ + .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \ +}, + +static void +__uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev) +{ + static const struct uc_fw_platform_requirement fw_blobs[] = { + INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB) + }; + int i; + + for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) { + if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) { + const struct uc_fw_blob *blob = + &fw_blobs[i].blobs[uc_fw->type]; + uc_fw->path = blob->path; + uc_fw->major_ver_wanted = blob->major; + uc_fw->minor_ver_wanted = blob->minor; + break; + } + } + + /* make sure the list is ordered as expected */ + if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) { + for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) { + if (fw_blobs[i].p < fw_blobs[i - 1].p) + continue; + + if (fw_blobs[i].p == fw_blobs[i - 1].p && + fw_blobs[i].rev < fw_blobs[i - 1].rev) + continue; + + pr_err("invalid FW blob order: %s r%u comes before %s r%u\n", + intel_platform_name(fw_blobs[i - 1].p), + fw_blobs[i - 1].rev, + intel_platform_name(fw_blobs[i].p), + fw_blobs[i].rev); + + uc_fw->path = NULL; + } + } + + /* We don't want to enable GuC/HuC on pre-Gen11 by default */ + if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE) + uc_fw->path = NULL; +} + +static const char *__override_guc_firmware_path(void) +{ + if (i915_modparams.enable_guc & (ENABLE_GUC_SUBMISSION | + ENABLE_GUC_LOAD_HUC)) + return i915_modparams.guc_firmware_path; + return ""; +} + +static const char *__override_huc_firmware_path(void) +{ + if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC) + return i915_modparams.huc_firmware_path; + return ""; +} + +static void __uc_fw_user_override(struct intel_uc_fw *uc_fw) +{ + const char *path = NULL; + + switch (uc_fw->type) { + case INTEL_UC_FW_TYPE_GUC: + path = __override_guc_firmware_path(); + break; + case INTEL_UC_FW_TYPE_HUC: + path = __override_huc_firmware_path(); + break; + } + + if (unlikely(path)) { + uc_fw->path = path; + uc_fw->user_overridden = true; + } +} + +/** + * intel_uc_fw_init_early - initialize the uC object and select the firmware + * @uc_fw: uC firmware + * @type: type of uC + * @supported: is uC support possible + * @platform: platform identifier + * @rev: hardware revision + * + * Initialize the state of our uC object and relevant tracking and select the + * firmware to fetch and load. + */ +void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_type type, bool supported, + enum intel_platform platform, u8 rev) +{ + /* + * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status + * before we're looked at the HW caps to see if we have uc support + */ + BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED); + GEM_BUG_ON(uc_fw->status); + GEM_BUG_ON(uc_fw->path); + + uc_fw->type = type; + + if (supported) { + __uc_fw_auto_select(uc_fw, platform, rev); + __uc_fw_user_override(uc_fw); + } + + intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ? + INTEL_UC_FIRMWARE_SELECTED : + INTEL_UC_FIRMWARE_DISABLED : + INTEL_UC_FIRMWARE_NOT_SUPPORTED); +} + +static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, + struct drm_i915_private *i915, + int e) +{ + bool user = e == -EINVAL; + + if (i915_inject_probe_error(i915, e)) { + /* non-existing blob */ + uc_fw->path = "<invalid>"; + uc_fw->user_overridden = user; + } else if (i915_inject_probe_error(i915, e)) { + /* require next major version */ + uc_fw->major_ver_wanted += 1; + uc_fw->minor_ver_wanted = 0; + uc_fw->user_overridden = user; + } else if (i915_inject_probe_error(i915, e)) { + /* require next minor version */ + uc_fw->minor_ver_wanted += 1; + uc_fw->user_overridden = user; + } else if (uc_fw->major_ver_wanted && + i915_inject_probe_error(i915, e)) { + /* require prev major version */ + uc_fw->major_ver_wanted -= 1; + uc_fw->minor_ver_wanted = 0; + uc_fw->user_overridden = user; + } else if (uc_fw->minor_ver_wanted && + i915_inject_probe_error(i915, e)) { + /* require prev minor version - hey, this should work! */ + uc_fw->minor_ver_wanted -= 1; + uc_fw->user_overridden = user; + } else if (user && i915_inject_probe_error(i915, e)) { + /* officially unsupported platform */ + uc_fw->major_ver_wanted = 0; + uc_fw->minor_ver_wanted = 0; + uc_fw->user_overridden = true; + } +} + +/** + * intel_uc_fw_fetch - fetch uC firmware + * @uc_fw: uC firmware + * @i915: device private + * + * Fetch uC firmware into GEM obj. + * + * Return: 0 on success, a negative errno code on failure. + */ +int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) +{ + struct device *dev = i915->drm.dev; + struct drm_i915_gem_object *obj; + const struct firmware *fw = NULL; + struct uc_css_header *css; + size_t size; + int err; + + GEM_BUG_ON(!i915->wopcm.size); + GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw)); + + err = i915_inject_probe_error(i915, -ENXIO); + if (err) + return err; + + __force_fw_fetch_failures(uc_fw, i915, -EINVAL); + __force_fw_fetch_failures(uc_fw, i915, -ESTALE); + + err = request_firmware(&fw, uc_fw->path, dev); + if (err) + goto fail; + + /* Check the size of the blob before examining buffer contents */ + if (unlikely(fw->size < sizeof(struct uc_css_header))) { + dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, + fw->size, sizeof(struct uc_css_header)); + err = -ENODATA; + goto fail; + } + + css = (struct uc_css_header *)fw->data; + + /* Check integrity of size values inside CSS header */ + size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw - + css->exponent_size_dw) * sizeof(u32); + if (unlikely(size != sizeof(struct uc_css_header))) { + dev_warn(dev, + "%s firmware %s: unexpected header size: %zu != %zu\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, + fw->size, sizeof(struct uc_css_header)); + err = -EPROTO; + goto fail; + } + + /* uCode size must calculated from other sizes */ + uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); + + /* now RSA */ + if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) { + dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, + css->key_size_dw, UOS_RSA_SCRATCH_COUNT); + err = -EPROTO; + goto fail; + } + uc_fw->rsa_size = css->key_size_dw * sizeof(u32); + + /* At least, it should have header, uCode and RSA. Size of all three. */ + size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size; + if (unlikely(fw->size < size)) { + dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, + fw->size, size); + err = -ENOEXEC; + goto fail; + } + + /* Sanity check whether this fw is not larger than whole WOPCM memory */ + size = __intel_uc_fw_get_upload_size(uc_fw); + if (unlikely(size >= i915->wopcm.size)) { + dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, + size, (size_t)i915->wopcm.size); + err = -E2BIG; + goto fail; + } + + /* Get version numbers from the CSS header */ + uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, + css->sw_version); + uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR, + css->sw_version); + + if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || + uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { + dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, + uc_fw->major_ver_found, uc_fw->minor_ver_found, + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); + if (!intel_uc_fw_is_overridden(uc_fw)) { + err = -ENOEXEC; + goto fail; + } + } + + obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto fail; + } + + uc_fw->obj = obj; + uc_fw->size = fw->size; + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE); + + release_firmware(fw); + return 0; + +fail: + intel_uc_fw_change_status(uc_fw, err == -ENOENT ? + INTEL_UC_FIRMWARE_MISSING : + INTEL_UC_FIRMWARE_ERROR); + + dev_notice(dev, "%s firmware %s: fetch failed with error %d\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); + dev_info(dev, "%s firmware(s) can be downloaded from %s\n", + intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); + + release_firmware(fw); /* OK even if fw is NULL */ + return err; +} + +static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) +{ + struct drm_mm_node *node = &ggtt->uc_fw; + + GEM_BUG_ON(!drm_mm_node_allocated(node)); + GEM_BUG_ON(upper_32_bits(node->start)); + GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); + + return lower_32_bits(node->start); +} + +static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw, + struct intel_gt *gt) +{ + struct drm_i915_gem_object *obj = uc_fw->obj; + struct i915_ggtt *ggtt = gt->ggtt; + struct i915_vma dummy = { + .node.start = uc_fw_ggtt_offset(uc_fw, ggtt), + .node.size = obj->base.size, + .pages = obj->mm.pages, + .vm = &ggtt->vm, + }; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size); + + /* uc_fw->obj cache domains were not controlled across suspend */ + drm_clflush_sg(dummy.pages); + + ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0); +} + +static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw, + struct intel_gt *gt) +{ + struct drm_i915_gem_object *obj = uc_fw->obj; + struct i915_ggtt *ggtt = gt->ggtt; + u64 start = uc_fw_ggtt_offset(uc_fw, ggtt); + + ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); +} + +static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt, + u32 wopcm_offset, u32 dma_flags) +{ + struct intel_uncore *uncore = gt->uncore; + u64 offset; + int ret; + + ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT); + if (ret) + return ret; + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + /* Set the source address for the uCode */ + offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt); + GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000); + intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); + intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset)); + + /* Set the DMA destination */ + intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset); + intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + + /* + * Set the transfer size. The header plus uCode will be copied to WOPCM + * via DMA, excluding any other components + */ + intel_uncore_write_fw(uncore, DMA_COPY_SIZE, + sizeof(struct uc_css_header) + uc_fw->ucode_size); + + /* Start the DMA */ + intel_uncore_write_fw(uncore, DMA_CTRL, + _MASKED_BIT_ENABLE(dma_flags | START_DMA)); + + /* Wait for DMA to finish */ + ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); + if (ret) + dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uncore_read_fw(uncore, DMA_CTRL)); + + /* Disable the bits once DMA is over */ + intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags)); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + return ret; +} + +/** + * intel_uc_fw_upload - load uC firmware using custom loader + * @uc_fw: uC firmware + * @gt: the intel_gt structure + * @wopcm_offset: destination offset in wopcm + * @dma_flags: flags for flags for dma ctrl + * + * Loads uC firmware and updates internal flags. + * + * Return: 0 on success, non-zero on failure. + */ +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, + u32 wopcm_offset, u32 dma_flags) +{ + int err; + + /* make sure the status was cleared the last time we reset the uc */ + GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); + + err = i915_inject_probe_error(gt->i915, -ENOEXEC); + if (err) + return err; + + if (!intel_uc_fw_is_available(uc_fw)) + return -ENOEXEC; + + /* Call custom loader */ + intel_uc_fw_ggtt_bind(uc_fw, gt); + err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags); + intel_uc_fw_ggtt_unbind(uc_fw, gt); + if (err) + goto fail; + + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED); + return 0; + +fail: + i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, + err); + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL); + return err; +} + +int intel_uc_fw_init(struct intel_uc_fw *uc_fw) +{ + int err; + + /* this should happen before the load! */ + GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); + + if (!intel_uc_fw_is_available(uc_fw)) + return -ENOEXEC; + + err = i915_gem_object_pin_pages(uc_fw->obj); + if (err) { + DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n", + intel_uc_fw_type_repr(uc_fw->type), err); + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL); + } + + return err; +} + +void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) +{ + if (!intel_uc_fw_is_available(uc_fw)) + return; + + i915_gem_object_unpin_pages(uc_fw->obj); +} + +/** + * intel_uc_fw_cleanup_fetch - cleanup uC firmware + * @uc_fw: uC firmware + * + * Cleans up uC firmware by releasing the firmware GEM obj. + */ +void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) +{ + if (!intel_uc_fw_is_available(uc_fw)) + return; + + i915_gem_object_put(fetch_and_zero(&uc_fw->obj)); + + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED); +} + +/** + * intel_uc_fw_copy_rsa - copy fw RSA to buffer + * + * @uc_fw: uC firmware + * @dst: dst buffer + * @max_len: max number of bytes to copy + * + * Return: number of copied bytes. + */ +size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len) +{ + struct sg_table *pages = uc_fw->obj->mm.pages; + u32 size = min_t(u32, uc_fw->rsa_size, max_len); + u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size; + + GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw)); + + return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset); +} + +/** + * intel_uc_fw_dump - dump information about uC firmware + * @uc_fw: uC firmware + * @p: the &drm_printer + * + * Pretty printer for uC firmware. + */ +void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) +{ + drm_printf(p, "%s firmware: %s\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); + drm_printf(p, "\tstatus: %s\n", + intel_uc_fw_status_repr(uc_fw->status)); + drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, + uc_fw->major_ver_found, uc_fw->minor_ver_found); + drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size); + drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h new file mode 100644 index 000000000000..7a0a5989afc9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2019 Intel Corporation + */ + +#ifndef _INTEL_UC_FW_H_ +#define _INTEL_UC_FW_H_ + +#include <linux/types.h> +#include "intel_uc_fw_abi.h" +#include "intel_device_info.h" +#include "i915_gem.h" + +struct drm_printer; +struct drm_i915_private; +struct intel_gt; + +/* Home of GuC, HuC and DMC firmwares */ +#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" + +/* + * +------------+---------------------------------------------------+ + * | PHASE | FIRMWARE STATUS TRANSITIONS | + * +============+===================================================+ + * | | UNINITIALIZED | + * +------------+- / | \ -+ + * | | DISABLED <--/ | \--> NOT_SUPPORTED | + * | init_early | V | + * | | SELECTED | + * +------------+- / | \ -+ + * | | MISSING <--/ | \--> ERROR | + * | fetch | | | + * | | /------> AVAILABLE <---<-----------\ | + * +------------+- \ / \ \ \ -+ + * | | FAIL <--< \--> TRANSFERRED \ | + * | upload | \ / \ / | + * | | \---------/ \--> RUNNING | + * +------------+---------------------------------------------------+ + */ + +enum intel_uc_fw_status { + INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */ + INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */ + INTEL_UC_FIRMWARE_DISABLED, /* disabled */ + INTEL_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */ + INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */ + INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */ + INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */ + INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */ + INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */ + INTEL_UC_FIRMWARE_RUNNING /* init/auth done */ +}; + +enum intel_uc_fw_type { + INTEL_UC_FW_TYPE_GUC = 0, + INTEL_UC_FW_TYPE_HUC +}; +#define INTEL_UC_FW_NUM_TYPES 2 + +/* + * This structure encapsulates all the data needed during the process + * of fetching, caching, and loading the firmware image into the uC. + */ +struct intel_uc_fw { + enum intel_uc_fw_type type; + union { + const enum intel_uc_fw_status status; + enum intel_uc_fw_status __status; /* no accidental overwrites */ + }; + const char *path; + bool user_overridden; + size_t size; + struct drm_i915_gem_object *obj; + + /* + * The firmware build process will generate a version header file with major and + * minor version defined. The versions are built into CSS header of firmware. + * i915 kernel driver set the minimal firmware version required per platform. + */ + u16 major_ver_wanted; + u16 minor_ver_wanted; + u16 major_ver_found; + u16 minor_ver_found; + + u32 rsa_size; + u32 ucode_size; +}; + +#ifdef CONFIG_DRM_I915_DEBUG_GUC +void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_status status); +#else +static inline void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_status status) +{ + uc_fw->__status = status; +} +#endif + +static inline +const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) +{ + switch (status) { + case INTEL_UC_FIRMWARE_NOT_SUPPORTED: + return "N/A"; + case INTEL_UC_FIRMWARE_UNINITIALIZED: + return "UNINITIALIZED"; + case INTEL_UC_FIRMWARE_DISABLED: + return "DISABLED"; + case INTEL_UC_FIRMWARE_SELECTED: + return "SELECTED"; + case INTEL_UC_FIRMWARE_MISSING: + return "MISSING"; + case INTEL_UC_FIRMWARE_ERROR: + return "ERROR"; + case INTEL_UC_FIRMWARE_AVAILABLE: + return "AVAILABLE"; + case INTEL_UC_FIRMWARE_FAIL: + return "FAIL"; + case INTEL_UC_FIRMWARE_TRANSFERRED: + return "TRANSFERRED"; + case INTEL_UC_FIRMWARE_RUNNING: + return "RUNNING"; + } + return "<invalid>"; +} + +static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status) +{ + switch (status) { + case INTEL_UC_FIRMWARE_NOT_SUPPORTED: + return -ENODEV; + case INTEL_UC_FIRMWARE_UNINITIALIZED: + return -EACCES; + case INTEL_UC_FIRMWARE_DISABLED: + return -EPERM; + case INTEL_UC_FIRMWARE_MISSING: + return -ENOENT; + case INTEL_UC_FIRMWARE_ERROR: + return -ENOEXEC; + case INTEL_UC_FIRMWARE_FAIL: + return -EIO; + case INTEL_UC_FIRMWARE_SELECTED: + return -ESTALE; + case INTEL_UC_FIRMWARE_AVAILABLE: + case INTEL_UC_FIRMWARE_TRANSFERRED: + case INTEL_UC_FIRMWARE_RUNNING: + return 0; + } + return -EINVAL; +} + +static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type) +{ + switch (type) { + case INTEL_UC_FW_TYPE_GUC: + return "GuC"; + case INTEL_UC_FW_TYPE_HUC: + return "HuC"; + } + return "uC"; +} + +static inline enum intel_uc_fw_status +__intel_uc_fw_status(struct intel_uc_fw *uc_fw) +{ + /* shouldn't call this before checking hw/blob availability */ + GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED); + return uc_fw->status; +} + +static inline bool intel_uc_fw_is_supported(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED; +} + +static inline bool intel_uc_fw_is_enabled(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) > INTEL_UC_FIRMWARE_DISABLED; +} + +static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE; +} + +static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED; +} + +static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING; +} + +static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw) +{ + return uc_fw->user_overridden; +} + +static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) +{ + if (intel_uc_fw_is_loaded(uc_fw)) + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE); +} + +static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) +{ + return sizeof(struct uc_css_header) + uc_fw->ucode_size; +} + +/** + * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded. + * @uc_fw: uC firmware. + * + * Get the size of the firmware and header that will be uploaded to WOPCM. + * + * Return: Upload firmware size, or zero on firmware fetch failure. + */ +static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) +{ + if (!intel_uc_fw_is_available(uc_fw)) + return 0; + + return __intel_uc_fw_get_upload_size(uc_fw); +} + +void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_type type, bool supported, + enum intel_platform platform, u8 rev); +int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915); +void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, + u32 wopcm_offset, u32 dma_flags); +int intel_uc_fw_init(struct intel_uc_fw *uc_fw); +void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); +size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len); +void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h new file mode 100644 index 000000000000..029214cdedd5 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef _INTEL_UC_FW_ABI_H +#define _INTEL_UC_FW_ABI_H + +#include <linux/types.h> +#include <linux/build_bug.h> + +/** + * DOC: Firmware Layout + * + * The GuC/HuC firmware layout looks like this:: + * + * +======================================================================+ + * | Firmware blob | + * +===============+===============+============+============+============+ + * | CSS header | uCode | RSA key | modulus | exponent | + * +===============+===============+============+============+============+ + * <-header size-> <---header size continued -----------> + * <--- size -----------------------------------------------------------> + * <-key size-> + * <-mod size-> + * <-exp size-> + * + * The firmware may or may not have modulus key and exponent data. The header, + * uCode and RSA signature are must-have components that will be used by driver. + * Length of each components, which is all in dwords, can be found in header. + * In the case that modulus and exponent are not present in fw, a.k.a truncated + * image, the length value still appears in header. + * + * Driver will do some basic fw size validation based on the following rules: + * + * 1. Header, uCode and RSA are must-have components. + * 2. All firmware components, if they present, are in the sequence illustrated + * in the layout table above. + * 3. Length info of each component can be found in header, in dwords. + * 4. Modulus and exponent key are not required by driver. They may not appear + * in fw. So driver will load a truncated firmware in this case. + */ + +struct uc_css_header { + u32 module_type; + /* + * header_size includes all non-uCode bits, including css_header, rsa + * key, modulus key and exponent data. + */ + u32 header_size_dw; + u32 header_version; + u32 module_id; + u32 module_vendor; + u32 date; +#define CSS_DATE_DAY (0xFF << 0) +#define CSS_DATE_MONTH (0xFF << 8) +#define CSS_DATE_YEAR (0xFFFF << 16) + u32 size_dw; /* uCode plus header_size_dw */ + u32 key_size_dw; + u32 modulus_size_dw; + u32 exponent_size_dw; + u32 time; +#define CSS_TIME_HOUR (0xFF << 0) +#define CSS_DATE_MIN (0xFF << 8) +#define CSS_DATE_SEC (0xFFFF << 16) + char username[8]; + char buildnumber[12]; + u32 sw_version; +#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16) +#define CSS_SW_VERSION_UC_MINOR (0xFF << 8) +#define CSS_SW_VERSION_UC_PATCH (0xFF << 0) + u32 reserved[14]; + u32 header_info; +} __packed; +static_assert(sizeof(struct uc_css_header) == 128); + +#endif /* _INTEL_UC_FW_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c new file mode 100644 index 000000000000..d8a80388bd31 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2017 Intel Corporation + */ + +#include "i915_selftest.h" +#include "gem/i915_gem_pm.h" + +/* max doorbell number + negative test for each client type */ +#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM) + +static struct intel_guc_client *clients[ATTEMPTS]; + +static bool available_dbs(struct intel_guc *guc, u32 priority) +{ + unsigned long offset; + unsigned long end; + u16 id; + + /* first half is used for normal priority, second half for high */ + offset = 0; + end = GUC_NUM_DOORBELLS / 2; + if (priority <= GUC_CLIENT_PRIORITY_HIGH) { + offset = end; + end += offset; + } + + id = find_next_zero_bit(guc->doorbell_bitmap, end, offset); + if (id < end) + return true; + + return false; +} + +static int check_all_doorbells(struct intel_guc *guc) +{ + u16 db_id; + + pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS); + for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) { + if (!doorbell_ok(guc, db_id)) { + pr_err("doorbell %d, not ok\n", db_id); + return -EIO; + } + } + + return 0; +} + +static int ring_doorbell_nop(struct intel_guc_client *client) +{ + struct guc_process_desc *desc = __get_process_desc(client); + int err; + + client->use_nop_wqi = true; + + spin_lock_irq(&client->wq_lock); + + guc_wq_item_append(client, 0, 0, 0, 0); + guc_ring_doorbell(client); + + spin_unlock_irq(&client->wq_lock); + + client->use_nop_wqi = false; + + /* if there are no issues GuC will update the WQ head and keep the + * WQ in active status + */ + err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10); + if (err) { + pr_err("doorbell %u ring failed!\n", client->doorbell_id); + return -EIO; + } + + if (desc->wq_status != WQ_STATUS_ACTIVE) { + pr_err("doorbell %u ring put WQ in bad state (%u)!\n", + client->doorbell_id, desc->wq_status); + return -EIO; + } + + return 0; +} + +/* + * Basic client sanity check, handy to validate create_clients. + */ +static int validate_client(struct intel_guc_client *client, int client_priority) +{ + if (client->priority != client_priority || + client->doorbell_id == GUC_DOORBELL_INVALID) + return -EINVAL; + else + return 0; +} + +static bool client_doorbell_in_sync(struct intel_guc_client *client) +{ + return !client || doorbell_ok(client->guc, client->doorbell_id); +} + +/* + * Check that we're able to synchronize guc_clients with their doorbells + * + * We're creating clients and reserving doorbells once, at module load. During + * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to + * GuC being reset. In other words - GuC clients are still around, but the + * status of their doorbells may be incorrect. This is the reason behind + * validating that the doorbells status expected by the driver matches what the + * GuC/HW have. + */ +static int igt_guc_clients(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_guc *guc = >->uc.guc; + intel_wakeref_t wakeref; + int err = 0; + + GEM_BUG_ON(!HAS_GT_UC(gt->i915)); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + err = check_all_doorbells(guc); + if (err) + goto unlock; + + /* + * Get rid of clients created during driver load because the test will + * recreate them. + */ + guc_clients_disable(guc); + guc_clients_destroy(guc); + if (guc->execbuf_client) { + pr_err("guc_clients_destroy lied!\n"); + err = -EINVAL; + goto unlock; + } + + err = guc_clients_create(guc); + if (err) { + pr_err("Failed to create clients\n"); + goto unlock; + } + GEM_BUG_ON(!guc->execbuf_client); + + err = validate_client(guc->execbuf_client, + GUC_CLIENT_PRIORITY_KMD_NORMAL); + if (err) { + pr_err("execbug client validation failed\n"); + goto out; + } + + /* the client should now have reserved a doorbell */ + if (!has_doorbell(guc->execbuf_client)) { + pr_err("guc_clients_create didn't reserve doorbells\n"); + err = -EINVAL; + goto out; + } + + /* Now enable the clients */ + guc_clients_enable(guc); + + /* each client should now have received a doorbell */ + if (!client_doorbell_in_sync(guc->execbuf_client)) { + pr_err("failed to initialize the doorbells\n"); + err = -EINVAL; + goto out; + } + + /* + * Basic test - an attempt to reallocate a valid doorbell to the + * client it is currently assigned should not cause a failure. + */ + err = create_doorbell(guc->execbuf_client); + +out: + /* + * Leave clean state for other test, plus the driver always destroy the + * clients during unload. + */ + guc_clients_disable(guc); + guc_clients_destroy(guc); + guc_clients_create(guc); + guc_clients_enable(guc); +unlock: + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + return err; +} + +/* + * Create as many clients as number of doorbells. Note that there's already + * client(s)/doorbell(s) created during driver load, but this test creates + * its own and do not interact with the existing ones. + */ +static int igt_guc_doorbells(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_guc *guc = >->uc.guc; + intel_wakeref_t wakeref; + int i, err = 0; + u16 db_id; + + GEM_BUG_ON(!HAS_GT_UC(gt->i915)); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + err = check_all_doorbells(guc); + if (err) + goto unlock; + + for (i = 0; i < ATTEMPTS; i++) { + clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM); + + if (!clients[i]) { + pr_err("[%d] No guc client\n", i); + err = -EINVAL; + goto out; + } + + if (IS_ERR(clients[i])) { + if (PTR_ERR(clients[i]) != -ENOSPC) { + pr_err("[%d] unexpected error\n", i); + err = PTR_ERR(clients[i]); + goto out; + } + + if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) { + pr_err("[%d] non-db related alloc fail\n", i); + err = -EINVAL; + goto out; + } + + /* expected, ran out of dbs for this client type */ + continue; + } + + /* + * The check below is only valid because we keep a doorbell + * assigned during the whole life of the client. + */ + if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) { + pr_err("[%d] more clients than doorbells (%d >= %d)\n", + i, clients[i]->stage_id, GUC_NUM_DOORBELLS); + err = -EINVAL; + goto out; + } + + err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM); + if (err) { + pr_err("[%d] client_alloc sanity check failed!\n", i); + err = -EINVAL; + goto out; + } + + db_id = clients[i]->doorbell_id; + + err = __guc_client_enable(clients[i]); + if (err) { + pr_err("[%d] Failed to create a doorbell\n", i); + goto out; + } + + /* doorbell id shouldn't change, we are holding the mutex */ + if (db_id != clients[i]->doorbell_id) { + pr_err("[%d] doorbell id changed (%d != %d)\n", + i, db_id, clients[i]->doorbell_id); + err = -EINVAL; + goto out; + } + + err = check_all_doorbells(guc); + if (err) + goto out; + + err = ring_doorbell_nop(clients[i]); + if (err) + goto out; + } + +out: + for (i = 0; i < ATTEMPTS; i++) + if (!IS_ERR_OR_NULL(clients[i])) { + __guc_client_disable(clients[i]); + guc_client_free(clients[i]); + } +unlock: + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + return err; +} + +int intel_guc_live_selftest(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_guc_clients), + SUBTEST(igt_guc_doorbells), + }; + + if (!USES_GUC_SUBMISSION(i915)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} |