aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0606-net-phy-improve-handling-delayed-work.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0606-net-phy-improve-handling-delayed-work.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0606-net-phy-improve-handling-delayed-work.patch144
1 files changed, 144 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0606-net-phy-improve-handling-delayed-work.patch b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0606-net-phy-improve-handling-delayed-work.patch
new file mode 100644
index 00000000..3e958b58
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0606-net-phy-improve-handling-delayed-work.patch
@@ -0,0 +1,144 @@
+From 673d3ccd258b2f752b025e83ee2495f9456a3bde Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Sun, 3 Feb 2019 01:02:32 +0530
+Subject: [PATCH 0606/2940] net: phy: improve handling delayed work
+
+ Using mod_delayed_work() allows to simplify handling delayed work and
+ removes the need for the sync parameter in phy_trigger_machine().
+ Also introduce a helper phy_queue_state_machine() to encapsulate the
+ low-level delayed work calls. No functional change intended.
+
+From 9f2959b6b52d43326b2f6a0e0d7ffe6f4fc3b5ca
+
+ Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/phy/phy.c | 37 +++++++++++++++++++++----------------
+ include/linux/phy.h | 2 +-
+ 2 files changed, 22 insertions(+), 17 deletions(-)
+ mode change 100644 => 100755 drivers/net/phy/phy.c
+
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+old mode 100644
+new mode 100755
+index 1ee25877c4d1..c15548ce06b9
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -537,7 +537,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
+ mutex_unlock(&phydev->lock);
+
+ if (trigger)
+- phy_trigger_machine(phydev, sync);
++ phy_trigger_machine(phydev);
+
+ return err;
+ }
+@@ -635,6 +635,13 @@ int phy_speed_up(struct phy_device *phydev)
+ }
+ EXPORT_SYMBOL_GPL(phy_speed_up);
+
++static void phy_queue_state_machine(struct phy_device *phydev,
++ unsigned int secs)
++{
++ mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
++ secs * HZ);
++}
++
+ /**
+ * phy_start_machine - start PHY state machine tracking
+ * @phydev: the phy_device struct
+@@ -647,7 +654,7 @@ EXPORT_SYMBOL_GPL(phy_speed_up);
+ */
+ void phy_start_machine(struct phy_device *phydev)
+ {
+- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
++ phy_queue_state_machine(phydev, 1);
+ }
+ EXPORT_SYMBOL_GPL(phy_start_machine);
+
+@@ -655,19 +662,14 @@ EXPORT_SYMBOL_GPL(phy_start_machine);
+ * phy_trigger_machine - trigger the state machine to run
+ *
+ * @phydev: the phy_device struct
+- * @sync: indicate whether we should wait for the workqueue cancelation
+ *
+ * Description: There has been a change in state which requires that the
+ * state machine runs.
+ */
+
+-void phy_trigger_machine(struct phy_device *phydev, bool sync)
++void phy_trigger_machine(struct phy_device *phydev)
+ {
+- if (sync)
+- cancel_delayed_work_sync(&phydev->state_queue);
+- else
+- cancel_delayed_work(&phydev->state_queue);
+- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
++ phy_queue_state_machine(phydev, 0);
+ }
+
+ /**
+@@ -703,7 +705,7 @@ static void phy_error(struct phy_device *phydev)
+ phydev->state = PHY_HALTED;
+ mutex_unlock(&phydev->lock);
+
+- phy_trigger_machine(phydev, false);
++ phy_trigger_machine(phydev);
+ }
+
+ /**
+@@ -745,7 +747,7 @@ static irqreturn_t phy_change(struct phy_device *phydev)
+ mutex_unlock(&phydev->lock);
+
+ /* reschedule state queue work to run as soon as possible */
+- phy_trigger_machine(phydev, true);
++ phy_trigger_machine(phydev);
+
+ if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+ goto phy_err;
+@@ -909,7 +911,7 @@ void phy_start(struct phy_device *phydev)
+ }
+ mutex_unlock(&phydev->lock);
+
+- phy_trigger_machine(phydev, true);
++ phy_trigger_machine(phydev);
+ }
+ EXPORT_SYMBOL(phy_start);
+
+@@ -1121,11 +1123,14 @@ void phy_state_machine(struct work_struct *work)
+
+ /* Only re-schedule a PHY state machine change if we are polling the
+ * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
+- * between states from phy_mac_interrupt()
++ * between states from phy_mac_interrupt().
++ *
++ * In state PHY_HALTED the PHY gets suspended, so rescheduling the
++ * state machine would be pointless and possibly error prone when
++ * called from phy_disconnect() synchronously.
+ */
+- if (phy_polling_mode(phydev))
+- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+- PHY_STATE_TIME * HZ);
++ if (phy_polling_mode(phydev) && old_state != PHY_HALTED)
++ phy_queue_state_machine(phydev, PHY_STATE_TIME);
+ }
+
+ /**
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 7086051820f9..2e86acadb9bc 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -1048,7 +1048,7 @@ void phy_change_work(struct work_struct *work);
+ void phy_mac_interrupt(struct phy_device *phydev);
+ void phy_start_machine(struct phy_device *phydev);
+ void phy_stop_machine(struct phy_device *phydev);
+-void phy_trigger_machine(struct phy_device *phydev, bool sync);
++void phy_trigger_machine(struct phy_device *phydev);
+ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+ void phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd);
+--
+2.17.1
+