diff options
| author | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
| commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
| tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /drivers/pci/controller/pci-hyperv.c | |
| parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
| parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'drivers/pci/controller/pci-hyperv.c')
| -rw-r--r-- | drivers/pci/controller/pci-hyperv.c | 419 | 
1 files changed, 257 insertions, 162 deletions
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index f33370b75628..2d93d0c4f10d 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -489,7 +489,10 @@ struct hv_pcibus_device {  	struct fwnode_handle *fwnode;  	/* Protocol version negotiated with the host */  	enum pci_protocol_version_t protocol_version; + +	struct mutex state_lock;  	enum hv_pcibus_state state; +  	struct hv_device *hdev;  	resource_size_t low_mmio_space;  	resource_size_t high_mmio_space; @@ -508,19 +511,11 @@ struct hv_pcibus_device {  	struct msi_domain_info msi_info;  	struct irq_domain *irq_domain; -	spinlock_t retarget_msi_interrupt_lock; -  	struct workqueue_struct *wq;  	/* Highest slot of child device with resources allocated */  	int wslot_res_allocated; - -	/* hypercall arg, must not cross page boundary */ -	struct hv_retarget_device_interrupt retarget_msi_interrupt_params; - -	/* -	 * Don't put anything here: retarget_msi_interrupt_params must be last -	 */ +	bool use_calls; /* Use hypercalls to access mmio cfg space */  };  /* @@ -553,19 +548,10 @@ struct hv_dr_state {  	struct hv_pcidev_description func[];  }; -enum hv_pcichild_state { -	hv_pcichild_init = 0, -	hv_pcichild_requirements, -	hv_pcichild_resourced, -	hv_pcichild_ejecting, -	hv_pcichild_maximum -}; -  struct hv_pci_dev {  	/* List protected by pci_rescan_remove_lock */  	struct list_head list_entry;  	refcount_t refs; -	enum hv_pcichild_state state;  	struct pci_slot *pci_slot;  	struct hv_pcidev_description desc;  	bool reported_missing; @@ -643,10 +629,15 @@ static void hv_arch_irq_unmask(struct irq_data *data)  	pbus = pdev->bus;  	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);  	int_desc = data->chip_data; +	if (!int_desc) { +		dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n", +			 __func__, data->irq); +		return; +	} -	spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); +	local_irq_save(flags); -	params = &hbus->retarget_msi_interrupt_params; +	params = *this_cpu_ptr(hyperv_pcpu_input_arg);  	memset(params, 0, sizeof(*params));  	params->partition_id = HV_PARTITION_ID_SELF;  	params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; @@ -679,7 +670,7 @@ static void hv_arch_irq_unmask(struct irq_data *data)  		if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {  			res = 1; -			goto exit_unlock; +			goto out;  		}  		cpumask_and(tmp, dest, cpu_online_mask); @@ -688,7 +679,7 @@ static void hv_arch_irq_unmask(struct irq_data *data)  		if (nr_bank <= 0) {  			res = 1; -			goto exit_unlock; +			goto out;  		}  		/* @@ -707,8 +698,8 @@ static void hv_arch_irq_unmask(struct irq_data *data)  	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),  			      params, NULL); -exit_unlock: -	spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); +out: +	local_irq_restore(flags);  	/*  	 * During hibernation, when a CPU is offlined, the kernel tries @@ -1041,6 +1032,70 @@ static int wslot_to_devfn(u32 wslot)  	return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);  } +static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val) +{ +	struct hv_mmio_read_input *in; +	struct hv_mmio_read_output *out; +	u64 ret; + +	/* +	 * Must be called with interrupts disabled so it is safe +	 * to use the per-cpu input argument page.  Use it for +	 * both input and output. +	 */ +	in = *this_cpu_ptr(hyperv_pcpu_input_arg); +	out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in); +	in->gpa = gpa; +	in->size = size; + +	ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out); +	if (hv_result_success(ret)) { +		switch (size) { +		case 1: +			*val = *(u8 *)(out->data); +			break; +		case 2: +			*val = *(u16 *)(out->data); +			break; +		default: +			*val = *(u32 *)(out->data); +			break; +		} +	} else +		dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n", +				ret, gpa, size); +} + +static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val) +{ +	struct hv_mmio_write_input *in; +	u64 ret; + +	/* +	 * Must be called with interrupts disabled so it is safe +	 * to use the per-cpu input argument memory. +	 */ +	in = *this_cpu_ptr(hyperv_pcpu_input_arg); +	in->gpa = gpa; +	in->size = size; +	switch (size) { +	case 1: +		*(u8 *)(in->data) = val; +		break; +	case 2: +		*(u16 *)(in->data) = val; +		break; +	default: +		*(u32 *)(in->data) = val; +		break; +	} + +	ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL); +	if (!hv_result_success(ret)) +		dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n", +				ret, gpa, size); +} +  /*   * PCI Configuration Space for these root PCI buses is implemented as a pair   * of pages in memory-mapped I/O space.  Writing to the first page chooses @@ -1059,8 +1114,10 @@ static int wslot_to_devfn(u32 wslot)  static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,  				     int size, u32 *val)  { +	struct hv_pcibus_device *hbus = hpdev->hbus; +	struct device *dev = &hbus->hdev->device; +	int offset = where + CFG_PAGE_OFFSET;  	unsigned long flags; -	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;  	/*  	 * If the attempt is to read the IDs or the ROM BAR, simulate that. @@ -1088,56 +1145,79 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,  		 */  		*val = 0;  	} else if (where + size <= CFG_PAGE_SIZE) { -		spin_lock_irqsave(&hpdev->hbus->config_lock, flags); -		/* Choose the function to be read. (See comment above) */ -		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); -		/* Make sure the function was chosen before we start reading. */ -		mb(); -		/* Read from that function's config space. */ -		switch (size) { -		case 1: -			*val = readb(addr); -			break; -		case 2: -			*val = readw(addr); -			break; -		default: -			*val = readl(addr); -			break; + +		spin_lock_irqsave(&hbus->config_lock, flags); +		if (hbus->use_calls) { +			phys_addr_t addr = hbus->mem_config->start + offset; + +			hv_pci_write_mmio(dev, hbus->mem_config->start, 4, +						hpdev->desc.win_slot.slot); +			hv_pci_read_mmio(dev, addr, size, val); +		} else { +			void __iomem *addr = hbus->cfg_addr + offset; + +			/* Choose the function to be read. (See comment above) */ +			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); +			/* Make sure the function was chosen before reading. */ +			mb(); +			/* Read from that function's config space. */ +			switch (size) { +			case 1: +				*val = readb(addr); +				break; +			case 2: +				*val = readw(addr); +				break; +			default: +				*val = readl(addr); +				break; +			} +			/* +			 * Make sure the read was done before we release the +			 * spinlock allowing consecutive reads/writes. +			 */ +			mb();  		} -		/* -		 * Make sure the read was done before we release the spinlock -		 * allowing consecutive reads/writes. -		 */ -		mb(); -		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); +		spin_unlock_irqrestore(&hbus->config_lock, flags);  	} else { -		dev_err(&hpdev->hbus->hdev->device, -			"Attempt to read beyond a function's config space.\n"); +		dev_err(dev, "Attempt to read beyond a function's config space.\n");  	}  }  static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)  { +	struct hv_pcibus_device *hbus = hpdev->hbus; +	struct device *dev = &hbus->hdev->device; +	u32 val;  	u16 ret;  	unsigned long flags; -	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + -			     PCI_VENDOR_ID; -	spin_lock_irqsave(&hpdev->hbus->config_lock, flags); +	spin_lock_irqsave(&hbus->config_lock, flags); -	/* Choose the function to be read. (See comment above) */ -	writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); -	/* Make sure the function was chosen before we start reading. */ -	mb(); -	/* Read from that function's config space. */ -	ret = readw(addr); -	/* -	 * mb() is not required here, because the spin_unlock_irqrestore() -	 * is a barrier. -	 */ +	if (hbus->use_calls) { +		phys_addr_t addr = hbus->mem_config->start + +					 CFG_PAGE_OFFSET + PCI_VENDOR_ID; + +		hv_pci_write_mmio(dev, hbus->mem_config->start, 4, +					hpdev->desc.win_slot.slot); +		hv_pci_read_mmio(dev, addr, 2, &val); +		ret = val;  /* Truncates to 16 bits */ +	} else { +		void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET + +					     PCI_VENDOR_ID; +		/* Choose the function to be read. (See comment above) */ +		writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); +		/* Make sure the function was chosen before we start reading. */ +		mb(); +		/* Read from that function's config space. */ +		ret = readw(addr); +		/* +		 * mb() is not required here, because the +		 * spin_unlock_irqrestore() is a barrier. +		 */ +	} -	spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); +	spin_unlock_irqrestore(&hbus->config_lock, flags);  	return ret;  } @@ -1152,39 +1232,51 @@ static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)  static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,  				      int size, u32 val)  { +	struct hv_pcibus_device *hbus = hpdev->hbus; +	struct device *dev = &hbus->hdev->device; +	int offset = where + CFG_PAGE_OFFSET;  	unsigned long flags; -	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;  	if (where >= PCI_SUBSYSTEM_VENDOR_ID &&  	    where + size <= PCI_CAPABILITY_LIST) {  		/* SSIDs and ROM BARs are read-only */  	} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) { -		spin_lock_irqsave(&hpdev->hbus->config_lock, flags); -		/* Choose the function to be written. (See comment above) */ -		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); -		/* Make sure the function was chosen before we start writing. */ -		wmb(); -		/* Write to that function's config space. */ -		switch (size) { -		case 1: -			writeb(val, addr); -			break; -		case 2: -			writew(val, addr); -			break; -		default: -			writel(val, addr); -			break; +		spin_lock_irqsave(&hbus->config_lock, flags); + +		if (hbus->use_calls) { +			phys_addr_t addr = hbus->mem_config->start + offset; + +			hv_pci_write_mmio(dev, hbus->mem_config->start, 4, +						hpdev->desc.win_slot.slot); +			hv_pci_write_mmio(dev, addr, size, val); +		} else { +			void __iomem *addr = hbus->cfg_addr + offset; + +			/* Choose the function to write. (See comment above) */ +			writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); +			/* Make sure the function was chosen before writing. */ +			wmb(); +			/* Write to that function's config space. */ +			switch (size) { +			case 1: +				writeb(val, addr); +				break; +			case 2: +				writew(val, addr); +				break; +			default: +				writel(val, addr); +				break; +			} +			/* +			 * Make sure the write was done before we release the +			 * spinlock allowing consecutive reads/writes. +			 */ +			mb();  		} -		/* -		 * Make sure the write was done before we release the spinlock -		 * allowing consecutive reads/writes. -		 */ -		mb(); -		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); +		spin_unlock_irqrestore(&hbus->config_lock, flags);  	} else { -		dev_err(&hpdev->hbus->hdev->device, -			"Attempt to write beyond a function's config space.\n"); +		dev_err(dev, "Attempt to write beyond a function's config space.\n");  	}  } @@ -1911,12 +2003,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)  		hv_pci_onchannelcallback(hbus);  		spin_unlock_irqrestore(&channel->sched_lock, flags); -		if (hpdev->state == hv_pcichild_ejecting) { -			dev_err_once(&hbus->hdev->device, -				     "the device is being ejected\n"); -			goto enable_tasklet; -		} -  		udelay(100);  	} @@ -2522,6 +2608,8 @@ static void pci_devices_present_work(struct work_struct *work)  	if (!dr)  		return; +	mutex_lock(&hbus->state_lock); +  	/* First, mark all existing children as reported missing. */  	spin_lock_irqsave(&hbus->device_list_lock, flags);  	list_for_each_entry(hpdev, &hbus->children, list_entry) { @@ -2603,6 +2691,8 @@ static void pci_devices_present_work(struct work_struct *work)  		break;  	} +	mutex_unlock(&hbus->state_lock); +  	kfree(dr);  } @@ -2751,7 +2841,7 @@ static void hv_eject_device_work(struct work_struct *work)  	hpdev = container_of(work, struct hv_pci_dev, wrk);  	hbus = hpdev->hbus; -	WARN_ON(hpdev->state != hv_pcichild_ejecting); +	mutex_lock(&hbus->state_lock);  	/*  	 * Ejection can come before or after the PCI bus has been set up, so @@ -2789,6 +2879,8 @@ static void hv_eject_device_work(struct work_struct *work)  	put_pcichild(hpdev);  	put_pcichild(hpdev);  	/* hpdev has been freed. Do not use it any more. */ + +	mutex_unlock(&hbus->state_lock);  }  /** @@ -2809,7 +2901,6 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev)  		return;  	} -	hpdev->state = hv_pcichild_ejecting;  	get_pcichild(hpdev);  	INIT_WORK(&hpdev->wrk, hv_eject_device_work);  	queue_work(hbus->wq, &hpdev->wrk); @@ -3238,8 +3329,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev)  	struct pci_bus_d0_entry *d0_entry;  	struct hv_pci_compl comp_pkt;  	struct pci_packet *pkt; +	bool retry = true;  	int ret; +enter_d0_retry:  	/*  	 * Tell the host that the bus is ready to use, and moved into the  	 * powered-on state.  This includes telling the host which region @@ -3266,6 +3359,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev)  	if (ret)  		goto exit; +	/* +	 * In certain case (Kdump) the pci device of interest was +	 * not cleanly shut down and resource is still held on host +	 * side, the host could return invalid device status. +	 * We need to explicitly request host to release the resource +	 * and try to enter D0 again. +	 */ +	if (comp_pkt.completion_status < 0 && retry) { +		retry = false; + +		dev_err(&hdev->device, "Retrying D0 Entry\n"); + +		/* +		 * Hv_pci_bus_exit() calls hv_send_resource_released() +		 * to free up resources of its child devices. +		 * In the kdump kernel we need to set the +		 * wslot_res_allocated to 255 so it scans all child +		 * devices to release resources allocated in the +		 * normal kernel before panic happened. +		 */ +		hbus->wslot_res_allocated = 255; + +		ret = hv_pci_bus_exit(hdev, true); + +		if (ret == 0) { +			kfree(pkt); +			goto enter_d0_retry; +		} +		dev_err(&hdev->device, +			"Retrying D0 failed with ret %d\n", ret); +	} +  	if (comp_pkt.completion_status < 0) {  		dev_err(&hdev->device,  			"PCI Pass-through VSP failed D0 Entry with status %x\n", @@ -3308,6 +3433,24 @@ static int hv_pci_query_relations(struct hv_device *hdev)  	if (!ret)  		ret = wait_for_response(hdev, &comp); +	/* +	 * In the case of fast device addition/removal, it's possible that +	 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we +	 * already got a PCI_BUS_RELATIONS* message from the host and the +	 * channel callback already scheduled a work to hbus->wq, which can be +	 * running pci_devices_present_work() -> survey_child_resources() -> +	 * complete(&hbus->survey_event), even after hv_pci_query_relations() +	 * exits and the stack variable 'comp' is no longer valid; as a result, +	 * a hang or a page fault may happen when the complete() calls +	 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from +	 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is +	 * -ENODEV, there can't be any more work item scheduled to hbus->wq +	 * after the flush_workqueue(): see vmbus_onoffer_rescind() -> +	 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() -> +	 * channel->rescind = true. +	 */ +	flush_workqueue(hbus->wq); +  	return ret;  } @@ -3493,42 +3636,18 @@ static int hv_pci_probe(struct hv_device *hdev,  	struct hv_pcibus_device *hbus;  	u16 dom_req, dom;  	char *name; -	bool enter_d0_retry = true;  	int ret; -	/* -	 * hv_pcibus_device contains the hypercall arguments for retargeting in -	 * hv_irq_unmask(). Those must not cross a page boundary. -	 */ -	BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE); -  	bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);  	if (!bridge)  		return -ENOMEM; -	/* -	 * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural -	 * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate -	 * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and -	 * alignment of hbus is important because hbus's field -	 * retarget_msi_interrupt_params must not cross a 4KB page boundary. -	 * -	 * Here we prefer kzalloc to get_zeroed_page(), because a buffer -	 * allocated by the latter is not tracked and scanned by kmemleak, and -	 * hence kmemleak reports the pointer contained in the hbus buffer -	 * (i.e. the hpdev struct, which is created in new_pcichild_device() and -	 * is tracked by hbus->children) as memory leak (false positive). -	 * -	 * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be -	 * used to allocate the hbus buffer and we can avoid the kmemleak false -	 * positive by using kmemleak_alloc() and kmemleak_free() to ask -	 * kmemleak to track and scan the hbus buffer. -	 */ -	hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); +	hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);  	if (!hbus)  		return -ENOMEM;  	hbus->bridge = bridge; +	mutex_init(&hbus->state_lock);  	hbus->state = hv_pcibus_init;  	hbus->wslot_res_allocated = -1; @@ -3563,6 +3682,7 @@ static int hv_pci_probe(struct hv_device *hdev,  	hbus->bridge->domain_nr = dom;  #ifdef CONFIG_X86  	hbus->sysdata.domain = dom; +	hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);  #elif defined(CONFIG_ARM64)  	/*  	 * Set the PCI bus parent to be the corresponding VMbus @@ -3572,6 +3692,7 @@ static int hv_pci_probe(struct hv_device *hdev,  	 * information to devices created on the bus.  	 */  	hbus->sysdata.parent = hdev->device.parent; +	hbus->use_calls = false;  #endif  	hbus->hdev = hdev; @@ -3579,7 +3700,6 @@ static int hv_pci_probe(struct hv_device *hdev,  	INIT_LIST_HEAD(&hbus->dr_list);  	spin_lock_init(&hbus->config_lock);  	spin_lock_init(&hbus->device_list_lock); -	spin_lock_init(&hbus->retarget_msi_interrupt_lock);  	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,  					   hbus->bridge->domain_nr);  	if (!hbus->wq) { @@ -3633,49 +3753,15 @@ static int hv_pci_probe(struct hv_device *hdev,  	if (ret)  		goto free_fwnode; -retry:  	ret = hv_pci_query_relations(hdev);  	if (ret)  		goto free_irq_domain; -	ret = hv_pci_enter_d0(hdev); -	/* -	 * In certain case (Kdump) the pci device of interest was -	 * not cleanly shut down and resource is still held on host -	 * side, the host could return invalid device status. -	 * We need to explicitly request host to release the resource -	 * and try to enter D0 again. -	 * Since the hv_pci_bus_exit() call releases structures -	 * of all its child devices, we need to start the retry from -	 * hv_pci_query_relations() call, requesting host to send -	 * the synchronous child device relations message before this -	 * information is needed in hv_send_resources_allocated() -	 * call later. -	 */ -	if (ret == -EPROTO && enter_d0_retry) { -		enter_d0_retry = false; - -		dev_err(&hdev->device, "Retrying D0 Entry\n"); - -		/* -		 * Hv_pci_bus_exit() calls hv_send_resources_released() -		 * to free up resources of its child devices. -		 * In the kdump kernel we need to set the -		 * wslot_res_allocated to 255 so it scans all child -		 * devices to release resources allocated in the -		 * normal kernel before panic happened. -		 */ -		hbus->wslot_res_allocated = 255; -		ret = hv_pci_bus_exit(hdev, true); +	mutex_lock(&hbus->state_lock); -		if (ret == 0) -			goto retry; - -		dev_err(&hdev->device, -			"Retrying D0 failed with ret %d\n", ret); -	} +	ret = hv_pci_enter_d0(hdev);  	if (ret) -		goto free_irq_domain; +		goto release_state_lock;  	ret = hv_pci_allocate_bridge_windows(hbus);  	if (ret) @@ -3693,12 +3779,15 @@ retry:  	if (ret)  		goto free_windows; +	mutex_unlock(&hbus->state_lock);  	return 0;  free_windows:  	hv_pci_free_bridge_windows(hbus);  exit_d0:  	(void) hv_pci_bus_exit(hdev, true); +release_state_lock: +	mutex_unlock(&hbus->state_lock);  free_irq_domain:  	irq_domain_remove(hbus->irq_domain);  free_fwnode: @@ -3948,20 +4037,26 @@ static int hv_pci_resume(struct hv_device *hdev)  	if (ret)  		goto out; +	mutex_lock(&hbus->state_lock); +  	ret = hv_pci_enter_d0(hdev);  	if (ret) -		goto out; +		goto release_state_lock;  	ret = hv_send_resources_allocated(hdev);  	if (ret) -		goto out; +		goto release_state_lock;  	prepopulate_bars(hbus);  	hv_pci_restore_msi_state(hbus);  	hbus->state = hv_pcibus_installed; +	mutex_unlock(&hbus->state_lock);  	return 0; + +release_state_lock: +	mutex_unlock(&hbus->state_lock);  out:  	vmbus_close(hdev->channel);  	return ret;  |