diff options
| author | Rodrigo Vivi <[email protected]> | 2018-07-23 09:13:12 -0700 | 
|---|---|---|
| committer | Rodrigo Vivi <[email protected]> | 2018-07-23 09:13:12 -0700 | 
| commit | c74a7469f97c0f40b46e82ee979f9fb1bb6e847c (patch) | |
| tree | f2690a1a916b73ef94657fbf0e0141ae57701825 /virt/kvm/arm/vgic/vgic-init.c | |
| parent | 6f15a7de86c8cf2dc09fc9e6d07047efa40ef809 (diff) | |
| parent | 500775074f88d9cf5416bed2ca19592812d62c41 (diff) | |
Merge drm/drm-next into drm-intel-next-queued
We need a backmerge to get DP_DPCD_REV_14 before we push other
i915 changes to dinq that could break compilation.
Signed-off-by: Rodrigo Vivi <[email protected]>
Diffstat (limited to 'virt/kvm/arm/vgic/vgic-init.c')
| -rw-r--r-- | virt/kvm/arm/vgic/vgic-init.c | 102 | 
1 files changed, 56 insertions, 46 deletions
| diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 68378fe17a0e..2673efce65f3 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -44,7 +44,7 @@   *   * CPU Interface:   * - * - kvm_vgic_vcpu_early_init(): initialization of static data that + * - kvm_vgic_vcpu_init(): initialization of static data that   *   doesn't depend on any sizing information or emulation type. No   *   allocation is allowed there.   */ @@ -67,46 +67,6 @@ void kvm_vgic_early_init(struct kvm *kvm)  	spin_lock_init(&dist->lpi_list_lock);  } -/** - * kvm_vgic_vcpu_early_init() - Initialize static VGIC VCPU data structures - * @vcpu: The VCPU whose VGIC data structures whould be initialized - * - * Only do initialization, but do not actually enable the VGIC CPU interface - * yet. - */ -void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu) -{ -	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; -	int i; - -	INIT_LIST_HEAD(&vgic_cpu->ap_list_head); -	spin_lock_init(&vgic_cpu->ap_list_lock); - -	/* -	 * Enable and configure all SGIs to be edge-triggered and -	 * configure all PPIs as level-triggered. -	 */ -	for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { -		struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; - -		INIT_LIST_HEAD(&irq->ap_list); -		spin_lock_init(&irq->irq_lock); -		irq->intid = i; -		irq->vcpu = NULL; -		irq->target_vcpu = vcpu; -		irq->targets = 1U << vcpu->vcpu_id; -		kref_init(&irq->refcount); -		if (vgic_irq_is_sgi(i)) { -			/* SGIs */ -			irq->enabled = 1; -			irq->config = VGIC_CONFIG_EDGE; -		} else { -			/* PPIs */ -			irq->config = VGIC_CONFIG_LEVEL; -		} -	} -} -  /* CREATION */  /** @@ -167,8 +127,11 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)  	kvm->arch.vgic.vgic_model = type;  	kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; -	kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; -	kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF; + +	if (type == KVM_DEV_TYPE_ARM_VGIC_V2) +		kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; +	else +		INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);  out_unlock:  	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { @@ -221,13 +184,50 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)  }  /** - * kvm_vgic_vcpu_init() - Register VCPU-specific KVM iodevs + * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data + * structures and register VCPU-specific KVM iodevs + *   * @vcpu: pointer to the VCPU being created and initialized + * + * Only do initialization, but do not actually enable the + * VGIC CPU interface   */  int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)  { -	int ret = 0; +	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;  	struct vgic_dist *dist = &vcpu->kvm->arch.vgic; +	int ret = 0; +	int i; + +	vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; +	vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; + +	INIT_LIST_HEAD(&vgic_cpu->ap_list_head); +	spin_lock_init(&vgic_cpu->ap_list_lock); + +	/* +	 * Enable and configure all SGIs to be edge-triggered and +	 * configure all PPIs as level-triggered. +	 */ +	for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { +		struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; + +		INIT_LIST_HEAD(&irq->ap_list); +		spin_lock_init(&irq->irq_lock); +		irq->intid = i; +		irq->vcpu = NULL; +		irq->target_vcpu = vcpu; +		irq->targets = 1U << vcpu->vcpu_id; +		kref_init(&irq->refcount); +		if (vgic_irq_is_sgi(i)) { +			/* SGIs */ +			irq->enabled = 1; +			irq->config = VGIC_CONFIG_EDGE; +		} else { +			/* PPIs */ +			irq->config = VGIC_CONFIG_LEVEL; +		} +	}  	if (!irqchip_in_kernel(vcpu->kvm))  		return 0; @@ -303,13 +303,23 @@ out:  static void kvm_vgic_dist_destroy(struct kvm *kvm)  {  	struct vgic_dist *dist = &kvm->arch.vgic; +	struct vgic_redist_region *rdreg, *next;  	dist->ready = false;  	dist->initialized = false;  	kfree(dist->spis); +	dist->spis = NULL;  	dist->nr_spis = 0; +	if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { +		list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) { +			list_del(&rdreg->list); +			kfree(rdreg); +		} +		INIT_LIST_HEAD(&dist->rd_regions); +	} +  	if (vgic_supports_direct_msis(kvm))  		vgic_v4_teardown(kvm);  } @@ -423,7 +433,7 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)  	 * We cannot rely on the vgic maintenance interrupt to be  	 * delivered synchronously. This means we can only use it to  	 * exit the VM, and we perform the handling of EOIed -	 * interrupts on the exit path (see vgic_process_maintenance). +	 * interrupts on the exit path (see vgic_fold_lr_state).  	 */  	return IRQ_HANDLED;  } |