diff options
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_drv.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_drv.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_fimd.c | 123 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cmd_parser.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/interrupt.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 33 | ||||
-rw-r--r-- | include/video/samsung_fimd.h | 10 |
13 files changed, 174 insertions, 81 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 7fea74861a87..160ce3c060a5 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -439,6 +439,4 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, if (drm_debug & DRM_UT_DRIVER) etnaviv_buffer_dump(gpu, buffer, 0, 0x50); - - gpu->lastctx = cmdbuf->ctx; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 52802e6049e0..96efc84396bf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -72,14 +72,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) for (i = 0; i < ETNA_MAX_PIPES; i++) { struct etnaviv_gpu *gpu = priv->gpu[i]; - if (gpu) { - mutex_lock(&gpu->lock); - if (gpu->lastctx == ctx) - gpu->lastctx = NULL; - mutex_unlock(&gpu->lock); - + if (gpu) drm_sched_entity_destroy(&ctx->sched_entity[i]); - } } kfree(ctx); @@ -523,7 +517,7 @@ static int etnaviv_bind(struct device *dev) if (!priv) { dev_err(dev, "failed to allocate private data\n"); ret = -ENOMEM; - goto out_unref; + goto out_put; } drm->dev_private = priv; @@ -549,7 +543,7 @@ out_register: component_unbind_all(dev, drm); out_bind: kfree(priv); -out_unref: +out_put: drm_dev_put(drm); return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 8d02d1b7dcf5..4bf698de5996 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -107,17 +107,6 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base) return base + nelem * elem_size; } -/* returns true if fence a comes after fence b */ -static inline bool fence_after(u32 a, u32 b) -{ - return (s32)(a - b) > 0; -} - -static inline bool fence_after_eq(u32 a, u32 b) -{ - return (s32)(a - b) >= 0; -} - /* * Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies. * We need to calculate the timeout in terms of number of jiffies diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index f225fbc6edd2..6904535475de 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -3,10 +3,12 @@ * Copyright (C) 2015-2018 Etnaviv Project */ +#include <linux/clk.h> #include <linux/component.h> #include <linux/dma-fence.h> #include <linux/moduleparam.h> #include <linux/of_device.h> +#include <linux/regulator/consumer.h> #include <linux/thermal.h> #include "etnaviv_cmdbuf.h" @@ -976,7 +978,6 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) { - unsigned long flags; unsigned int i = 0; dev_err(gpu->dev, "recover hung GPU!\n"); @@ -989,15 +990,13 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) etnaviv_hw_reset(gpu); /* complete all events, the GPU won't do it after the reset */ - spin_lock_irqsave(&gpu->event_spinlock, flags); + spin_lock(&gpu->event_spinlock); for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) complete(&gpu->event_free); bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS); - spin_unlock_irqrestore(&gpu->event_spinlock, flags); - gpu->completed_fence = gpu->active_fence; + spin_unlock(&gpu->event_spinlock); etnaviv_gpu_hw_init(gpu); - gpu->lastctx = NULL; gpu->exec_state = -1; mutex_unlock(&gpu->lock); @@ -1032,7 +1031,7 @@ static bool etnaviv_fence_signaled(struct dma_fence *fence) { struct etnaviv_fence *f = to_etnaviv_fence(fence); - return fence_completed(f->gpu, f->base.seqno); + return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0; } static void etnaviv_fence_release(struct dma_fence *fence) @@ -1071,6 +1070,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) return &f->base; } +/* returns true if fence a comes after fence b */ +static inline bool fence_after(u32 a, u32 b) +{ + return (s32)(a - b) > 0; +} + /* * event management: */ @@ -1078,7 +1083,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, unsigned int *events) { - unsigned long flags, timeout = msecs_to_jiffies(10 * 10000); + unsigned long timeout = msecs_to_jiffies(10 * 10000); unsigned i, acquired = 0; for (i = 0; i < nr_events; i++) { @@ -1095,7 +1100,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, timeout = ret; } - spin_lock_irqsave(&gpu->event_spinlock, flags); + spin_lock(&gpu->event_spinlock); for (i = 0; i < nr_events; i++) { int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS); @@ -1105,7 +1110,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, set_bit(event, gpu->event_bitmap); } - spin_unlock_irqrestore(&gpu->event_spinlock, flags); + spin_unlock(&gpu->event_spinlock); return 0; @@ -1118,18 +1123,11 @@ out: static void event_free(struct etnaviv_gpu *gpu, unsigned int event) { - unsigned long flags; - - spin_lock_irqsave(&gpu->event_spinlock, flags); - if (!test_bit(event, gpu->event_bitmap)) { dev_warn(gpu->dev, "event %u is already marked as free", event); - spin_unlock_irqrestore(&gpu->event_spinlock, flags); } else { clear_bit(event, gpu->event_bitmap); - spin_unlock_irqrestore(&gpu->event_spinlock, flags); - complete(&gpu->event_free); } } @@ -1306,8 +1304,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) goto out_unlock; } - gpu->active_fence = gpu_fence->seqno; - if (submit->nr_pmrs) { gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; kref_get(&submit->refcount); @@ -1549,7 +1545,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) etnaviv_gpu_update_clock(gpu); etnaviv_gpu_hw_init(gpu); - gpu->lastctx = NULL; gpu->exec_state = -1; mutex_unlock(&gpu->lock); @@ -1806,8 +1801,8 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev) struct etnaviv_gpu *gpu = dev_get_drvdata(dev); u32 idle, mask; - /* If we have outstanding fences, we're not idle */ - if (gpu->completed_fence != gpu->active_fence) + /* If there are any jobs in the HW queue, we're not idle */ + if (atomic_read(&gpu->sched.hw_rq_count)) return -EBUSY; /* Check whether the hardware (except FE) is idle */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 9a75a6937268..9bcf151f706b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -6,9 +6,6 @@ #ifndef __ETNAVIV_GPU_H__ #define __ETNAVIV_GPU_H__ -#include <linux/clk.h> -#include <linux/regulator/consumer.h> - #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" @@ -88,6 +85,8 @@ struct etnaviv_event { struct etnaviv_cmdbuf_suballoc; struct etnaviv_cmdbuf; +struct regulator; +struct clk; #define ETNA_NR_EVENTS 30 @@ -98,7 +97,6 @@ struct etnaviv_gpu { struct mutex lock; struct etnaviv_chip_identity identity; enum etnaviv_sec_mode sec_mode; - struct etnaviv_file_private *lastctx; struct workqueue_struct *wq; struct drm_gpu_scheduler sched; @@ -121,7 +119,6 @@ struct etnaviv_gpu { struct mutex fence_lock; struct idr fence_idr; u32 next_fence; - u32 active_fence; u32 completed_fence; wait_queue_head_t fence_event; u64 fence_context; @@ -161,11 +158,6 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) return readl(gpu->mmio + reg); } -static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence) -{ - return fence_after_eq(gpu->completed_fence, fence); -} - int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); int etnaviv_gpu_init(struct etnaviv_gpu *gpu); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index e3d6a8584715..786a8ee6f10f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -228,6 +228,21 @@ static const uint32_t fimd_formats[] = { DRM_FORMAT_ARGB8888, }; +static const unsigned int capabilities[WINDOWS_NR] = { + 0, + EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND, + EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND, + EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND, + EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND, +}; + +static inline void fimd_set_bits(struct fimd_context *ctx, u32 reg, u32 mask, + u32 val) +{ + val = (val & mask) | (readl(ctx->regs + reg) & ~mask); + writel(val, ctx->regs + reg); +} + static int fimd_enable_vblank(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; @@ -551,13 +566,88 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) writel(val, ctx->regs + VIDCON0); } +static void fimd_win_set_bldeq(struct fimd_context *ctx, unsigned int win, + unsigned int alpha, unsigned int pixel_alpha) +{ + u32 mask = BLENDEQ_A_FUNC_F(0xf) | BLENDEQ_B_FUNC_F(0xf); + u32 val = 0; + + switch (pixel_alpha) { + case DRM_MODE_BLEND_PIXEL_NONE: + case DRM_MODE_BLEND_COVERAGE: + val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA_A); + val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A); + break; + case DRM_MODE_BLEND_PREMULTI: + default: + if (alpha != DRM_BLEND_ALPHA_OPAQUE) { + val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA0); + val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A); + } else { + val |= BLENDEQ_A_FUNC_F(BLENDEQ_ONE); + val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A); + } + break; + } + fimd_set_bits(ctx, BLENDEQx(win), mask, val); +} -static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, - uint32_t pixel_format, int width) +static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win, + unsigned int alpha, unsigned int pixel_alpha) { - unsigned long val; + u32 win_alpha_l = (alpha >> 8) & 0xf; + u32 win_alpha_h = alpha >> 12; + u32 val = 0; - val = WINCONx_ENWIN; + switch (pixel_alpha) { + case DRM_MODE_BLEND_PIXEL_NONE: + break; + case DRM_MODE_BLEND_COVERAGE: + case DRM_MODE_BLEND_PREMULTI: + default: + val |= WINCON1_ALPHA_SEL; + val |= WINCON1_BLD_PIX; + val |= WINCON1_ALPHA_MUL; + break; + } + fimd_set_bits(ctx, WINCON(win), WINCONx_BLEND_MODE_MASK, val); + + /* OSD alpha */ + val = VIDISD14C_ALPHA0_R(win_alpha_h) | + VIDISD14C_ALPHA0_G(win_alpha_h) | + VIDISD14C_ALPHA0_B(win_alpha_h) | + VIDISD14C_ALPHA1_R(0x0) | + VIDISD14C_ALPHA1_G(0x0) | + VIDISD14C_ALPHA1_B(0x0); + writel(val, ctx->regs + VIDOSD_C(win)); + + val = VIDW_ALPHA_R(win_alpha_l) | VIDW_ALPHA_G(win_alpha_l) | + VIDW_ALPHA_B(win_alpha_l); + writel(val, ctx->regs + VIDWnALPHA0(win)); + + val = VIDW_ALPHA_R(0x0) | VIDW_ALPHA_G(0x0) | + VIDW_ALPHA_B(0x0); + writel(val, ctx->regs + VIDWnALPHA1(win)); + + fimd_set_bits(ctx, BLENDCON, BLENDCON_NEW_MASK, + BLENDCON_NEW_8BIT_ALPHA_VALUE); +} + +static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, + struct drm_framebuffer *fb, int width) +{ + struct exynos_drm_plane plane = ctx->planes[win]; + struct exynos_drm_plane_state *state = + to_exynos_plane_state(plane.base.state); + uint32_t pixel_format = fb->format->format; + unsigned int alpha = state->base.alpha; + u32 val = WINCONx_ENWIN; + unsigned int pixel_alpha; + + if (fb->format->has_alpha) + pixel_alpha = state->base.pixel_blend_mode; + else + pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE; /* * In case of s3c64xx, window 0 doesn't support alpha channel. @@ -591,8 +681,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, break; case DRM_FORMAT_ARGB8888: default: - val |= WINCON1_BPPMODE_25BPP_A1888 - | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL; + val |= WINCON1_BPPMODE_25BPP_A1888; val |= WINCONx_WSWP; val |= WINCONx_BURSTLEN_16WORD; break; @@ -610,25 +699,12 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, val &= ~WINCONx_BURSTLEN_MASK; val |= WINCONx_BURSTLEN_4WORD; } - - writel(val, ctx->regs + WINCON(win)); + fimd_set_bits(ctx, WINCON(win), ~WINCONx_BLEND_MODE_MASK, val); /* hardware window 0 doesn't support alpha channel. */ if (win != 0) { - /* OSD alpha */ - val = VIDISD14C_ALPHA0_R(0xf) | - VIDISD14C_ALPHA0_G(0xf) | - VIDISD14C_ALPHA0_B(0xf) | - VIDISD14C_ALPHA1_R(0xf) | - VIDISD14C_ALPHA1_G(0xf) | - VIDISD14C_ALPHA1_B(0xf); - - writel(val, ctx->regs + VIDOSD_C(win)); - - val = VIDW_ALPHA_R(0xf) | VIDW_ALPHA_G(0xf) | - VIDW_ALPHA_G(0xf); - writel(val, ctx->regs + VIDWnALPHA0(win)); - writel(val, ctx->regs + VIDWnALPHA1(win)); + fimd_win_set_bldmod(ctx, win, alpha, pixel_alpha); + fimd_win_set_bldeq(ctx, win, alpha, pixel_alpha); } } @@ -785,7 +861,7 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc, DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); } - fimd_win_set_pixfmt(ctx, win, fb->format->format, state->src.w); + fimd_win_set_pixfmt(ctx, win, fb, state->src.w); /* hardware window 0 doesn't support color key. */ if (win != 0) @@ -987,6 +1063,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats); ctx->configs[i].zpos = i; ctx->configs[i].type = fimd_win_types[i]; + ctx->configs[i].capabilities = capabilities[i]; ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, &ctx->configs[i]); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 77edbfcb0f75..77ae634eb11c 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1900,11 +1900,11 @@ static struct cmd_info cmd_info[] = { {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, - {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL, + {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 8, NULL}, - {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS, - ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait}, + {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, + D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait}, {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm}, diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 6ef5a7fc70df..733a2a0d0c30 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -437,7 +437,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ret = intel_gvt_debugfs_init(gvt); if (ret) - gvt_err("debugfs registeration failed, go on.\n"); + gvt_err("debugfs registration failed, go on.\n"); gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 31f6cdbe5c42..b4ab1dad0143 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -159,6 +159,10 @@ struct intel_vgpu_submission { struct kmem_cache *workloads; atomic_t running_workload_num; struct i915_gem_context *shadow_ctx; + union { + u64 i915_context_pml4; + u64 i915_context_pdps[GEN8_3LVL_PDPES]; + }; DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); void *ring_scan_buffer[I915_NUM_ENGINES]; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index aa280bb07125..b5475c91e2ef 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -475,6 +475,7 @@ static i915_reg_t force_nonpriv_white_list[] = { _MMIO(0x7704), _MMIO(0x7708), _MMIO(0x770c), + _MMIO(0x83a8), _MMIO(0xb110), GEN8_L3SQCREG4,//_MMIO(0xb118) _MMIO(0xe100), diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 5daa23ae566b..6b9d1354ff29 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -126,7 +126,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = { [FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C", [AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C", [AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C", - [ERR_AND_DBG] = "South Error and Debug Interupts Combined", + [ERR_AND_DBG] = "South Error and Debug Interrupts Combined", [GMBUS] = "Gmbus", [SDVO_B_HOTPLUG] = "SDVO B hotplug", [CRT_HOTPLUG] = "CRT Hotplug", diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b8fbe3fabea3..1ad8c5e1455d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1079,6 +1079,21 @@ err: return ret; } +static void +i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s) +{ + struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; + int i; + + if (i915_vm_is_48bit(&i915_ppgtt->vm)) + px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4; + else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) + px_dma(i915_ppgtt->pdp.page_directory[i]) = + s->i915_context_pdps[i]; + } +} + /** * intel_vgpu_clean_submission - free submission-related resource for vGPU * @vgpu: a vGPU @@ -1091,6 +1106,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) struct intel_vgpu_submission *s = &vgpu->submission; intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); + i915_context_ppgtt_root_restore(s); i915_gem_context_put(s->shadow_ctx); kmem_cache_destroy(s->workloads); } @@ -1116,6 +1132,21 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, s->ops->reset(vgpu, engine_mask); } +static void +i915_context_ppgtt_root_save(struct intel_vgpu_submission *s) +{ + struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; + int i; + + if (i915_vm_is_48bit(&i915_ppgtt->vm)) + s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4); + else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) + s->i915_context_pdps[i] = + px_dma(i915_ppgtt->pdp.page_directory[i]); + } +} + /** * intel_vgpu_setup_submission - setup submission-related resource for vGPU * @vgpu: a vGPU @@ -1138,6 +1169,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) if (IS_ERR(s->shadow_ctx)) return PTR_ERR(s->shadow_ctx); + i915_context_ppgtt_root_save(s); + bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h index d8fc96ed11e9..4ba5efe8d086 100644 --- a/include/video/samsung_fimd.h +++ b/include/video/samsung_fimd.h @@ -198,6 +198,7 @@ #define WINCONx_BURSTLEN_8WORD (0x1 << 9) #define WINCONx_BURSTLEN_4WORD (0x2 << 9) #define WINCONx_ENWIN (1 << 0) +#define WINCONx_BLEND_MODE_MASK (0xc2) #define WINCON0_BPPMODE_MASK (0xf << 2) #define WINCON0_BPPMODE_SHIFT 2 @@ -211,6 +212,7 @@ #define WINCON0_BPPMODE_24BPP_888 (0xb << 2) #define WINCON1_LOCALSEL_CAMIF (1 << 23) +#define WINCON1_ALPHA_MUL (1 << 7) #define WINCON1_BLD_PIX (1 << 6) #define WINCON1_BPPMODE_MASK (0xf << 2) #define WINCON1_BPPMODE_SHIFT 2 @@ -437,6 +439,14 @@ #define WPALCON_W0PAL_16BPP_565 (0x6 << 0) /* Blending equation control */ +#define BLENDEQx(_win) (0x244 + ((_win - 1) * 4)) +#define BLENDEQ_ZERO 0x0 +#define BLENDEQ_ONE 0x1 +#define BLENDEQ_ALPHA_A 0x2 +#define BLENDEQ_ONE_MINUS_ALPHA_A 0x3 +#define BLENDEQ_ALPHA0 0x6 +#define BLENDEQ_B_FUNC_F(_x) (_x << 6) +#define BLENDEQ_A_FUNC_F(_x) (_x << 0) #define BLENDCON 0x260 #define BLENDCON_NEW_MASK (1 << 0) #define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0) |