diff options
Diffstat (limited to 'drivers/net/phy/phy.c')
| -rw-r--r-- | drivers/net/phy/phy.c | 136 | 
1 files changed, 100 insertions, 36 deletions
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index f424b867f73e..25f93a98863b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -143,13 +143,14 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)   * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation   * is still pending.   */ -static inline int phy_aneg_done(struct phy_device *phydev) +int phy_aneg_done(struct phy_device *phydev)  {  	if (phydev->drv->aneg_done)  		return phydev->drv->aneg_done(phydev);  	return genphy_aneg_done(phydev);  } +EXPORT_SYMBOL(phy_aneg_done);  /* A structure for mapping a particular speed and duplex   * combination to a particular SUPPORTED and ADVERTISED value @@ -261,6 +262,41 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)  }  /** + * phy_supported_speeds - return all speeds currently supported by a phy device + * @phy: The phy device to return supported speeds of. + * @speeds: buffer to store supported speeds in. + * @size:   size of speeds buffer. + * + * Description: Returns the number of supported speeds, and fills the speeds + * buffer with the supported speeds. If speeds buffer is too small to contain + * all currently supported speeds, will return as many speeds as can fit. + */ +unsigned int phy_supported_speeds(struct phy_device *phy, +				  unsigned int *speeds, +				  unsigned int size) +{ +	unsigned int count = 0; +	unsigned int idx = 0; + +	while (idx < MAX_NUM_SETTINGS && count < size) { +		idx = phy_find_valid(idx, phy->supported); + +		if (!(settings[idx].setting & phy->supported)) +			break; + +		/* Assumes settings are grouped by speed */ +		if ((count == 0) || +		    (speeds[count - 1] != settings[idx].speed)) { +			speeds[count] = settings[idx].speed; +			count++; +		} +		idx++; +	} + +	return count; +} + +/**   * phy_check_valid - check if there is a valid PHY setting which matches   *		     speed, duplex, and feature mask   * @speed: speed to match @@ -353,7 +389,7 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)  	phydev->duplex = cmd->duplex; -	phydev->mdix = cmd->eth_tp_mdix_ctrl; +	phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl;  	/* Restart the PHY */  	phy_start_aneg(phydev); @@ -407,7 +443,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,  	phydev->duplex = duplex; -	phydev->mdix = cmd->base.eth_tp_mdix_ctrl; +	phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;  	/* Restart the PHY */  	phy_start_aneg(phydev); @@ -433,7 +469,8 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)  	cmd->transceiver = phy_is_internal(phydev) ?  		XCVR_INTERNAL : XCVR_EXTERNAL;  	cmd->autoneg = phydev->autoneg; -	cmd->eth_tp_mdix_ctrl = phydev->mdix; +	cmd->eth_tp_mdix_ctrl = phydev->mdix_ctrl; +	cmd->eth_tp_mdix = phydev->mdix;  	return 0;  } @@ -460,7 +497,8 @@ int phy_ethtool_ksettings_get(struct phy_device *phydev,  	cmd->base.phy_address = phydev->mdio.addr;  	cmd->base.autoneg = phydev->autoneg; -	cmd->base.eth_tp_mdix_ctrl = phydev->mdix; +	cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; +	cmd->base.eth_tp_mdix = phydev->mdix;  	return 0;  } @@ -664,7 +702,7 @@ static void phy_error(struct phy_device *phydev)   * @phy_dat: phy_device pointer   *   * Description: When a PHY interrupt occurs, the handler disables - * interrupts, and schedules a work task to clear the interrupt. + * interrupts, and uses phy_change to handle the interrupt.   */  static irqreturn_t phy_interrupt(int irq, void *phy_dat)  { @@ -673,15 +711,10 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)  	if (PHY_HALTED == phydev->state)  		return IRQ_NONE;		/* It can't be ours.  */ -	/* The MDIO bus is not allowed to be written in interrupt -	 * context, so we need to disable the irq here.  A work -	 * queue will write the PHY to disable and clear the -	 * interrupt, and then reenable the irq line. -	 */  	disable_irq_nosync(irq);  	atomic_inc(&phydev->irq_disable); -	queue_work(system_power_efficient_wq, &phydev->phy_queue); +	phy_change(phydev);  	return IRQ_HANDLED;  } @@ -739,10 +772,9 @@ phy_err:  int phy_start_interrupts(struct phy_device *phydev)  {  	atomic_set(&phydev->irq_disable, 0); -	if (request_irq(phydev->irq, phy_interrupt, -				IRQF_SHARED, -				"phy_interrupt", -				phydev) < 0) { +	if (request_threaded_irq(phydev->irq, NULL, phy_interrupt, +				 IRQF_ONESHOT | IRQF_SHARED, +				 phydev_name(phydev), phydev) < 0) {  		pr_warn("%s: Can't get IRQ %d (PHY)\n",  			phydev->mdio.bus->name, phydev->irq);  		phydev->irq = PHY_POLL; @@ -766,12 +798,6 @@ int phy_stop_interrupts(struct phy_device *phydev)  	free_irq(phydev->irq, phydev); -	/* Cannot call flush_scheduled_work() here as desired because -	 * of rtnl_lock(), but we do not really care about what would -	 * be done, except from enable_irq(), so cancel any work -	 * possibly pending and take care of the matter below. -	 */ -	cancel_work_sync(&phydev->phy_queue);  	/* If work indeed has been cancelled, disable_irq() will have  	 * been left unbalanced from phy_interrupt() and enable_irq()  	 * has to be called so that other devices on the line work. @@ -784,14 +810,11 @@ int phy_stop_interrupts(struct phy_device *phydev)  EXPORT_SYMBOL(phy_stop_interrupts);  /** - * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes - * @work: work_struct that describes the work to be done + * phy_change - Called by the phy_interrupt to handle PHY changes + * @phydev: phy_device struct that interrupted   */ -void phy_change(struct work_struct *work) +void phy_change(struct phy_device *phydev)  { -	struct phy_device *phydev = -		container_of(work, struct phy_device, phy_queue); -  	if (phy_interrupt_is_valid(phydev)) {  		if (phydev->drv->did_interrupt &&  		    !phydev->drv->did_interrupt(phydev)) @@ -833,6 +856,18 @@ phy_err:  }  /** + * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes + * @work: work_struct that describes the work to be done + */ +void phy_change_work(struct work_struct *work) +{ +	struct phy_device *phydev = +		container_of(work, struct phy_device, phy_queue); + +	phy_change(phydev); +} + +/**   * phy_stop - Bring down the PHY link, and stop checking the status   * @phydev: target phy_device struct   */ @@ -911,6 +946,12 @@ void phy_start(struct phy_device *phydev)  }  EXPORT_SYMBOL(phy_start); +static void phy_adjust_link(struct phy_device *phydev) +{ +	phydev->adjust_link(phydev->attached_dev); +	phy_led_trigger_change_speed(phydev); +} +  /**   * phy_state_machine - Handle the state machine   * @work: work_struct that describes the work to be done @@ -953,7 +994,7 @@ void phy_state_machine(struct work_struct *work)  		if (!phydev->link) {  			phydev->state = PHY_NOLINK;  			netif_carrier_off(phydev->attached_dev); -			phydev->adjust_link(phydev->attached_dev); +			phy_adjust_link(phydev);  			break;  		} @@ -966,7 +1007,7 @@ void phy_state_machine(struct work_struct *work)  		if (err > 0) {  			phydev->state = PHY_RUNNING;  			netif_carrier_on(phydev->attached_dev); -			phydev->adjust_link(phydev->attached_dev); +			phy_adjust_link(phydev);  		} else if (0 == phydev->link_timeout--)  			needs_aneg = true; @@ -993,7 +1034,7 @@ void phy_state_machine(struct work_struct *work)  			}  			phydev->state = PHY_RUNNING;  			netif_carrier_on(phydev->attached_dev); -			phydev->adjust_link(phydev->attached_dev); +			phy_adjust_link(phydev);  		}  		break;  	case PHY_FORCING: @@ -1009,7 +1050,7 @@ void phy_state_machine(struct work_struct *work)  				needs_aneg = true;  		} -		phydev->adjust_link(phydev->attached_dev); +		phy_adjust_link(phydev);  		break;  	case PHY_RUNNING:  		/* Only register a CHANGE if we are polling and link changed @@ -1038,7 +1079,7 @@ void phy_state_machine(struct work_struct *work)  			netif_carrier_off(phydev->attached_dev);  		} -		phydev->adjust_link(phydev->attached_dev); +		phy_adjust_link(phydev);  		if (phy_interrupt_is_valid(phydev))  			err = phy_config_interrupt(phydev, @@ -1048,7 +1089,7 @@ void phy_state_machine(struct work_struct *work)  		if (phydev->link) {  			phydev->link = 0;  			netif_carrier_off(phydev->attached_dev); -			phydev->adjust_link(phydev->attached_dev); +			phy_adjust_link(phydev);  			do_suspend = true;  		}  		break; @@ -1072,7 +1113,7 @@ void phy_state_machine(struct work_struct *work)  				} else	{  					phydev->state = PHY_NOLINK;  				} -				phydev->adjust_link(phydev->attached_dev); +				phy_adjust_link(phydev);  			} else {  				phydev->state = PHY_AN;  				phydev->link_timeout = PHY_AN_TIMEOUT; @@ -1088,7 +1129,7 @@ void phy_state_machine(struct work_struct *work)  			} else	{  				phydev->state = PHY_NOLINK;  			} -			phydev->adjust_link(phydev->attached_dev); +			phy_adjust_link(phydev);  		}  		break;  	} @@ -1116,6 +1157,15 @@ void phy_state_machine(struct work_struct *work)  				   PHY_STATE_TIME * HZ);  } +/** + * phy_mac_interrupt - MAC says the link has changed + * @phydev: phy_device struct with changed link + * @new_link: Link is Up/Down. + * + * Description: The MAC layer is able indicate there has been a change + *   in the PHY link status. Set the new link status, and trigger the + *   state machine, work a work queue. + */  void phy_mac_interrupt(struct phy_device *phydev, int new_link)  {  	phydev->link = new_link; @@ -1348,6 +1398,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)  {  	int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); +	/* Mask prohibited EEE modes */ +	val &= ~phydev->eee_broken_modes; +  	phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val);  	return 0; @@ -1393,3 +1446,14 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,  	return phy_ethtool_ksettings_set(phydev, cmd);  }  EXPORT_SYMBOL(phy_ethtool_set_link_ksettings); + +int phy_ethtool_nway_reset(struct net_device *ndev) +{ +	struct phy_device *phydev = ndev->phydev; + +	if (!phydev) +		return -ENODEV; + +	return genphy_restart_aneg(phydev); +} +EXPORT_SYMBOL(phy_ethtool_nway_reset);  |