diff options
Diffstat (limited to 'arch/s390/kvm/interrupt.c')
| -rw-r--r-- | arch/s390/kvm/interrupt.c | 272 | 
1 files changed, 266 insertions, 6 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c30615e605ac..37116a77cb4b 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -21,11 +21,31 @@  #include "gaccess.h"  #include "trace-s390.h" +#define IOINT_SCHID_MASK 0x0000ffff +#define IOINT_SSID_MASK 0x00030000 +#define IOINT_CSSID_MASK 0x03fc0000 +#define IOINT_AI_MASK 0x04000000 + +static int is_ioint(u64 type) +{ +	return ((type & 0xfffe0000u) != 0xfffe0000u); +} +  static int psw_extint_disabled(struct kvm_vcpu *vcpu)  {  	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);  } +static int psw_ioint_disabled(struct kvm_vcpu *vcpu) +{ +	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); +} + +static int psw_mchk_disabled(struct kvm_vcpu *vcpu) +{ +	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); +} +  static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)  {  	if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || @@ -35,6 +55,13 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)  	return 1;  } +static u64 int_word_to_isc_bits(u32 int_word) +{ +	u8 isc = (int_word & 0x38000000) >> 27; + +	return (0x80 >> isc) << 24; +} +  static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,  				      struct kvm_s390_interrupt_info *inti)  { @@ -67,7 +94,22 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,  	case KVM_S390_SIGP_SET_PREFIX:  	case KVM_S390_RESTART:  		return 1; +	case KVM_S390_MCHK: +		if (psw_mchk_disabled(vcpu)) +			return 0; +		if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) +			return 1; +		return 0; +	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: +		if (psw_ioint_disabled(vcpu)) +			return 0; +		if (vcpu->arch.sie_block->gcr[6] & +		    int_word_to_isc_bits(inti->io.io_int_word)) +			return 1; +		return 0;  	default: +		printk(KERN_WARNING "illegal interrupt type %llx\n", +		       inti->type);  		BUG();  	}  	return 0; @@ -93,6 +135,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)  		CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,  		&vcpu->arch.sie_block->cpuflags);  	vcpu->arch.sie_block->lctl = 0x0000; +	vcpu->arch.sie_block->ictl &= ~ICTL_LPSW;  }  static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) @@ -116,6 +159,18 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,  	case KVM_S390_SIGP_STOP:  		__set_cpuflag(vcpu, CPUSTAT_STOP_INT);  		break; +	case KVM_S390_MCHK: +		if (psw_mchk_disabled(vcpu)) +			vcpu->arch.sie_block->ictl |= ICTL_LPSW; +		else +			vcpu->arch.sie_block->lctl |= LCTL_CR14; +		break; +	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: +		if (psw_ioint_disabled(vcpu)) +			__set_cpuflag(vcpu, CPUSTAT_IO_INT); +		else +			vcpu->arch.sie_block->lctl |= LCTL_CR6; +		break;  	default:  		BUG();  	} @@ -297,6 +352,73 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,  			exception = 1;  		break; +	case KVM_S390_MCHK: +		VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", +			   inti->mchk.mcic); +		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, +						 inti->mchk.cr14, +						 inti->mchk.mcic); +		rc = kvm_s390_vcpu_store_status(vcpu, +						KVM_S390_STORE_STATUS_PREFIXED); +		if (rc == -EFAULT) +			exception = 1; + +		rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic); +		if (rc == -EFAULT) +			exception = 1; + +		rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW, +				   &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); +		if (rc == -EFAULT) +			exception = 1; + +		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, +				     __LC_MCK_NEW_PSW, sizeof(psw_t)); +		if (rc == -EFAULT) +			exception = 1; +		break; + +	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: +	{ +		__u32 param0 = ((__u32)inti->io.subchannel_id << 16) | +			inti->io.subchannel_nr; +		__u64 param1 = ((__u64)inti->io.io_int_parm << 32) | +			inti->io.io_int_word; +		VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); +		vcpu->stat.deliver_io_int++; +		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, +						 param0, param1); +		rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID, +				   inti->io.subchannel_id); +		if (rc == -EFAULT) +			exception = 1; + +		rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR, +				   inti->io.subchannel_nr); +		if (rc == -EFAULT) +			exception = 1; + +		rc = put_guest_u32(vcpu, __LC_IO_INT_PARM, +				   inti->io.io_int_parm); +		if (rc == -EFAULT) +			exception = 1; + +		rc = put_guest_u32(vcpu, __LC_IO_INT_WORD, +				   inti->io.io_int_word); +		if (rc == -EFAULT) +			exception = 1; + +		rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW, +				   &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); +		if (rc == -EFAULT) +			exception = 1; + +		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, +				     __LC_IO_NEW_PSW, sizeof(psw_t)); +		if (rc == -EFAULT) +			exception = 1; +		break; +	}  	default:  		BUG();  	} @@ -362,7 +484,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)  	}  	if ((!rc) && (vcpu->arch.sie_block->ckc < -		get_clock() + vcpu->arch.sie_block->epoch)) { +		get_tod_clock() + vcpu->arch.sie_block->epoch)) {  		if ((!psw_extint_disabled(vcpu)) &&  			(vcpu->arch.sie_block->gcr[0] & 0x800ul))  			rc = 1; @@ -402,13 +524,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)  		goto no_timer;  	} -	now = get_clock() + vcpu->arch.sie_block->epoch; +	now = get_tod_clock() + vcpu->arch.sie_block->epoch;  	if (vcpu->arch.sie_block->ckc < now) {  		__unset_cpu_idle(vcpu);  		return 0;  	} -	sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; +	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);  	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);  	VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); @@ -492,7 +614,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)  	}  	if ((vcpu->arch.sie_block->ckc < -		get_clock() + vcpu->arch.sie_block->epoch)) +		get_tod_clock() + vcpu->arch.sie_block->epoch))  		__try_deliver_ckc_interrupt(vcpu);  	if (atomic_read(&fi->active)) { @@ -518,6 +640,61 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)  	}  } +void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) +{ +	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; +	struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; +	struct kvm_s390_interrupt_info  *n, *inti = NULL; +	int deliver; + +	__reset_intercept_indicators(vcpu); +	if (atomic_read(&li->active)) { +		do { +			deliver = 0; +			spin_lock_bh(&li->lock); +			list_for_each_entry_safe(inti, n, &li->list, list) { +				if ((inti->type == KVM_S390_MCHK) && +				    __interrupt_is_deliverable(vcpu, inti)) { +					list_del(&inti->list); +					deliver = 1; +					break; +				} +				__set_intercept_indicator(vcpu, inti); +			} +			if (list_empty(&li->list)) +				atomic_set(&li->active, 0); +			spin_unlock_bh(&li->lock); +			if (deliver) { +				__do_deliver_interrupt(vcpu, inti); +				kfree(inti); +			} +		} while (deliver); +	} + +	if (atomic_read(&fi->active)) { +		do { +			deliver = 0; +			spin_lock(&fi->lock); +			list_for_each_entry_safe(inti, n, &fi->list, list) { +				if ((inti->type == KVM_S390_MCHK) && +				    __interrupt_is_deliverable(vcpu, inti)) { +					list_del(&inti->list); +					deliver = 1; +					break; +				} +				__set_intercept_indicator(vcpu, inti); +			} +			if (list_empty(&fi->list)) +				atomic_set(&fi->active, 0); +			spin_unlock(&fi->lock); +			if (deliver) { +				__do_deliver_interrupt(vcpu, inti); +				kfree(inti); +			} +		} while (deliver); +	} +} +  int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)  {  	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -540,12 +717,50 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)  	return 0;  } +struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, +						    u64 cr6, u64 schid) +{ +	struct kvm_s390_float_interrupt *fi; +	struct kvm_s390_interrupt_info *inti, *iter; + +	if ((!schid && !cr6) || (schid && cr6)) +		return NULL; +	mutex_lock(&kvm->lock); +	fi = &kvm->arch.float_int; +	spin_lock(&fi->lock); +	inti = NULL; +	list_for_each_entry(iter, &fi->list, list) { +		if (!is_ioint(iter->type)) +			continue; +		if (cr6 && +		    ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) +			continue; +		if (schid) { +			if (((schid & 0x00000000ffff0000) >> 16) != +			    iter->io.subchannel_id) +				continue; +			if ((schid & 0x000000000000ffff) != +			    iter->io.subchannel_nr) +				continue; +		} +		inti = iter; +		break; +	} +	if (inti) +		list_del_init(&inti->list); +	if (list_empty(&fi->list)) +		atomic_set(&fi->active, 0); +	spin_unlock(&fi->lock); +	mutex_unlock(&kvm->lock); +	return inti; +} +  int kvm_s390_inject_vm(struct kvm *kvm,  		       struct kvm_s390_interrupt *s390int)  {  	struct kvm_s390_local_interrupt *li;  	struct kvm_s390_float_interrupt *fi; -	struct kvm_s390_interrupt_info *inti; +	struct kvm_s390_interrupt_info *inti, *iter;  	int sigcpu;  	inti = kzalloc(sizeof(*inti), GFP_KERNEL); @@ -569,6 +784,29 @@ int kvm_s390_inject_vm(struct kvm *kvm,  	case KVM_S390_SIGP_STOP:  	case KVM_S390_INT_EXTERNAL_CALL:  	case KVM_S390_INT_EMERGENCY: +		kfree(inti); +		return -EINVAL; +	case KVM_S390_MCHK: +		VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", +			 s390int->parm64); +		inti->type = s390int->type; +		inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ +		inti->mchk.mcic = s390int->parm64; +		break; +	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: +		if (s390int->type & IOINT_AI_MASK) +			VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); +		else +			VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", +				 s390int->type & IOINT_CSSID_MASK, +				 s390int->type & IOINT_SSID_MASK, +				 s390int->type & IOINT_SCHID_MASK); +		inti->type = s390int->type; +		inti->io.subchannel_id = s390int->parm >> 16; +		inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; +		inti->io.io_int_parm = s390int->parm64 >> 32; +		inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; +		break;  	default:  		kfree(inti);  		return -EINVAL; @@ -579,7 +817,22 @@ int kvm_s390_inject_vm(struct kvm *kvm,  	mutex_lock(&kvm->lock);  	fi = &kvm->arch.float_int;  	spin_lock(&fi->lock); -	list_add_tail(&inti->list, &fi->list); +	if (!is_ioint(inti->type)) +		list_add_tail(&inti->list, &fi->list); +	else { +		u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); + +		/* Keep I/O interrupts sorted in isc order. */ +		list_for_each_entry(iter, &fi->list, list) { +			if (!is_ioint(iter->type)) +				continue; +			if (int_word_to_isc_bits(iter->io.io_int_word) +			    <= isc_bits) +				continue; +			break; +		} +		list_add_tail(&inti->list, &iter->list); +	}  	atomic_set(&fi->active, 1);  	sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);  	if (sigcpu == KVM_MAX_VCPUS) { @@ -651,8 +904,15 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,  		inti->type = s390int->type;  		inti->emerg.code = s390int->parm;  		break; +	case KVM_S390_MCHK: +		VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", +			   s390int->parm64); +		inti->type = s390int->type; +		inti->mchk.mcic = s390int->parm64; +		break;  	case KVM_S390_INT_VIRTIO:  	case KVM_S390_INT_SERVICE: +	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:  	default:  		kfree(inti);  		return -EINVAL;  |